repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/std_api.rs | src/std_api.rs | // This file was derived from rust's own libstd/process.rs with the following
// copyright:
//
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
use std::ffi::OsStr;
use std::default::Default;
use std::collections::HashMap;
use std::collections::HashSet;
use std::env;
use std::path::Path;
use libc::{uid_t, gid_t};
use crate::ffi_util::ToCString;
use crate::{Command, Stdio, Fd};
impl Command {
/// Constructs a new `Command` for launching the program at
/// path `program`, with the following default configuration:
///
/// * No arguments to the program
/// * Inherit the current process's environment
/// * Inherit the current process's working directory
/// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output`
///
/// Builder methods are provided to change these defaults and
/// otherwise configure the process.
pub fn new<S: AsRef<OsStr>>(program: S) -> Command {
Command {
filename: program.to_cstring(),
args: vec![program.to_cstring()],
environ: None,
config: Default::default(),
chroot_dir: None,
pivot_root: None,
fds: vec![
(0, Fd::inherit()),
(1, Fd::inherit()),
(2, Fd::inherit()),
].into_iter().collect(),
close_fds: Vec::new(),
id_map_commands: None,
pid_env_vars: HashSet::new(),
keep_caps: None,
before_unfreeze: None,
pre_exec: None,
}
}
/// Add an argument to pass to the program.
pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.args.push(arg.to_cstring());
self
}
/// Add multiple arguments to pass to the program.
pub fn args<S: AsRef<OsStr>>(&mut self, args: &[S]) -> &mut Command {
self.args.extend(args.iter().map(ToCString::to_cstring));
self
}
// TODO(tailhook) It's only public for our run module any better way?
// TODO(tailhook) make it private
#[doc(hidden)]
pub fn init_env_map(&mut self) {
if self.environ.is_none() {
self.environ = Some(env::vars_os().collect());
}
}
/// Inserts or updates an environment variable mapping.
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command
where K: AsRef<OsStr>, V: AsRef<OsStr>
{
self.init_env_map();
self.environ.as_mut().unwrap().insert(
key.as_ref().to_os_string(),
val.as_ref().to_os_string());
self.pid_env_vars.remove(key.as_ref());
self
}
/// Inserts or updates multiple environment variable mappings.
pub fn envs<I, K, V>(&mut self, vars: I)-> &mut Command
where I: IntoIterator<Item=(K, V)>, K: AsRef<OsStr>, V: AsRef<OsStr>
{
for (ref key, ref val) in vars {
self.init_env_map();
self.environ.as_mut().unwrap().insert(
key.as_ref().to_os_string(),
val.as_ref().to_os_string());
self.pid_env_vars.remove(key.as_ref());
}
self
}
/// Removes an environment variable mapping.
pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command {
self.init_env_map();
self.environ.as_mut().unwrap().remove(key.as_ref());
self.pid_env_vars.remove(key.as_ref());
self
}
/// Clears the entire environment map for the child process.
pub fn env_clear(&mut self) -> &mut Command {
self.environ = Some(HashMap::new());
self.pid_env_vars = HashSet::new();
self
}
/// Sets the working directory for the child process.
///
/// Note: in case of `chroot` or `pivot_root` the working directory is
/// always set to something inside the new root. Algorithm is following:
///
/// 1. If path is set to absolute path, current dir is this path *inside*
/// the chroot
/// 2. Check if chroot dir is prefix of `env::current_dir()`. If it is
/// set current directory to the suffix. Otherwise set current directory
/// to the new root dir.
/// 3. If `current_dir` is specified (and relative) set working directory
/// to the value (i.e. relative to the dir set in #2)
///
/// The `pivot_root` is treated just the same as `chroot`. I.e. we will
/// not try to set working directory inside the `old_root`, unless path
/// inside is set explicitly by this method.
///
/// At the end of the day, the ``cmd.current_dir(env::current_dir())`` is
/// not no-op if using chroot/pivot_root.
pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command
{
self.config.work_dir = Some(dir.as_ref().to_cstring());
self
}
/// Configuration for the child process's stdin handle (file descriptor 0).
pub fn stdin(&mut self, cfg: Stdio) -> &mut Command {
self.fds.insert(0, cfg.to_fd(false));
self
}
/// Configuration for the child process's stdout handle (file descriptor 1).
pub fn stdout(&mut self, cfg: Stdio) -> &mut Command {
self.fds.insert(1, cfg.to_fd(true));
self
}
/// Configuration for the child process's stderr handle (file descriptor 2).
pub fn stderr(&mut self, cfg: Stdio) -> &mut Command {
self.fds.insert(2, cfg.to_fd(true));
self
}
/// Set user id of the new process. Note that it works only for root
/// process or if you also set up user namespace
pub fn uid(&mut self, id: uid_t) -> &mut Command {
self.config.uid = Some(id);
self
}
/// Set primary group id of the new process. Note that it works only for
/// root process or if you also set up user namespace
pub fn gid(&mut self, id: gid_t) -> &mut Command {
self.config.gid = Some(id);
self
}
/// Set supplementary group ids. Note that it works only for root process
/// or if you also set up user namespace
pub fn groups(&mut self, ids: Vec<gid_t>) -> &mut Command {
self.config.supplementary_gids = Some(ids);
self
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/lib.rs | src/lib.rs | //! The `Command` has mostly same API as `std::process::Command` except where
//! is absolutely needed.
//!
//! In addition `Command` contains methods to configure linux namespaces,
//! chroots and more linux stuff.
//!
//! We have diverged from ``std::process::Command`` in the following
//! major things:
//!
//! 1. Error handling. Since sometimes we have long chains of system calls
//! involved, we need to give user some way to find out which call failed
//! with an error, so `io::Error` is not an option. We have
//! ``error::Error`` class which describes the error as precisely as
//! possible
//!
//! 2. We set ``PDEATHSIG`` to ``SIGKILL`` by default. I.e. child process will
//! die when parent is dead. This is what you want most of the time. If you
//! want to allow child process to daemonize explicitly call the
//! ``allow_daemonize`` method (but look at documentation of
//! ``Command::set_parent_death_signal`` first).
//!
//! 3. We don't search for `program` in `PATH`. It's hard to do right in all
//! cases of `chroot`, `pivot_root`, user and mount namespaces. So we expect
//! its easier to do for your specific container setup.
//!
//! Anyway this is low-level interface. You may want to use some higher level
//! abstraction which mounts filesystems, sets network and monitors processes.
//!
#![warn(missing_docs)]
extern crate libc;
extern crate nix;
#[cfg(test)] extern crate rand;
mod caps;
mod namespace;
mod idmap;
mod chroot;
mod ffi_util;
mod std_api;
mod config;
mod error;
mod pipe;
mod child;
mod callbacks;
mod linux;
mod fds;
mod run;
mod status;
mod wait;
mod stdio;
mod debug;
mod zombies;
pub use crate::error::Error;
pub use crate::status::ExitStatus;
pub use crate::stdio::{Stdio, Fd};
pub use crate::pipe::{PipeReader, PipeWriter};
pub use crate::namespace::{Namespace};
pub use crate::idmap::{UidMap, GidMap};
pub use crate::zombies::{reap_zombies, child_events, ChildEvent};
pub use nix::sys::signal::Signal;
pub use crate::debug::{Style, Printer};
pub use crate::caps::{Capability};
use std::ffi::{CString, OsString};
use std::path::PathBuf;
use std::os::unix::io::RawFd;
use std::collections::{HashMap, HashSet};
use std::io;
use crate::pipe::PipeHolder;
use libc::{pid_t};
type BoxError = Box<dyn (::std::error::Error) + Send + Sync + 'static>;
/// Main class for running processes. Works in the spirit of builder pattern.
pub struct Command {
filename: CString,
args: Vec<CString>,
environ: Option<HashMap<OsString, OsString>>,
config: config::Config,
fds: HashMap<RawFd, Fd>,
close_fds: Vec<(RawFd, RawFd)>,
chroot_dir: Option<PathBuf>,
pivot_root: Option<(PathBuf, PathBuf, bool)>,
id_map_commands: Option<(PathBuf, PathBuf)>,
pid_env_vars: HashSet<OsString>,
keep_caps: Option<[u32; 2]>,
before_unfreeze: Option<Box<dyn FnMut(u32) -> Result<(), BoxError>>>,
pre_exec: Option<Box<dyn Fn() -> Result<(), io::Error>>>,
}
/// The reference to the running child
#[derive(Debug)]
pub struct Child {
pid: pid_t,
status: Option<ExitStatus>,
fds: HashMap<RawFd, PipeHolder>,
/// Stdin of a child if it is a pipe
pub stdin: Option<PipeWriter>,
/// Stdout of a child if it is a pipe
pub stdout: Option<PipeReader>,
/// Stderr of a child if it is a pipe
pub stderr: Option<PipeReader>,
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/ffi_util.rs | src/ffi_util.rs | use std::ffi::{CString, OsStr};
use std::os::unix::ffi::OsStrExt;
pub trait ToCString {
fn to_cstring(&self) -> CString;
}
impl<T:AsRef<OsStr>> ToCString for T {
fn to_cstring(&self) -> CString {
CString::new(self.as_ref().as_bytes())
.unwrap()
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/stdio.rs | src/stdio.rs | use std::io;
use std::os::unix::io::{RawFd, AsRawFd, IntoRawFd};
use nix;
use nix::fcntl::{fcntl, FcntlArg};
use libc;
/// An enumeration that is used to configure stdio file descritors
///
/// The enumeration members might be non-stable, it's better to use
/// one of the constructors to create an instance
pub enum Stdio {
/// This fd will use pipe to/from the appliation
Pipe,
/// This fd will be inherited from the parent application
Inherit,
/// This fd will open /dev/null in read or write mode
Null,
/// This is fd passed by application (and closed by `unshare`)
Fd(Closing),
}
/// An enumeration that is used to configure non-stdio file descriptors. It
/// differs from stdio one because we must differentiate from readable and
/// writable file descriptors for things open by the library
///
/// The enumeration members might be non-stable, it's better to use
/// one of the constructors to create an instance
// TODO(tailhook) should this object be hidden?
pub enum Fd {
/// This fd is a reading end of a pipe
ReadPipe,
/// This fd is a writing end of a pipe
WritePipe,
/// This fd is inherited from parent (current) process
Inherit,
/// This fd is redirected from `/dev/null`
ReadNull,
/// This fd is redirected to `/dev/null`
WriteNull,
/// This is fd passed by application (and closed by `unshare`)
Fd(Closing),
}
pub struct Closing(RawFd);
pub fn dup_file_cloexec<F: AsRawFd>(file: &F) -> io::Result<Closing> {
match fcntl(file.as_raw_fd(), FcntlArg::F_DUPFD_CLOEXEC(3)) {
Ok(fd) => Ok(Closing::new(fd)),
Err(nix::Error::Sys(errno)) => {
return Err(io::Error::from_raw_os_error(errno as i32));
}
Err(nix::Error::InvalidPath) => unreachable!(),
Err(nix::Error::InvalidUtf8) => unreachable!(),
Err(nix::Error::UnsupportedOperation) => {
return Err(io::Error::new(io::ErrorKind::Other,
"nix error: unsupported operation"));
}
}
}
impl Stdio {
/// Pipe is created for child process
pub fn piped() -> Stdio { Stdio::Pipe }
/// The child inherits file descriptor from the parent process
pub fn inherit() -> Stdio { Stdio::Inherit }
/// Stream is attached to `/dev/null`
pub fn null() -> Stdio { Stdio::Null }
/// Converts stdio definition to file descriptor definition
/// (mostly needed internally)
pub fn to_fd(self, write: bool) -> Fd {
match (self, write) {
(Stdio::Fd(x), _) => Fd::Fd(x),
(Stdio::Pipe, false) => Fd::ReadPipe,
(Stdio::Pipe, true) => Fd::WritePipe,
(Stdio::Inherit, _) => Fd::Inherit,
(Stdio::Null, false) => Fd::ReadNull,
(Stdio::Null, true) => Fd::WriteNull,
}
}
/// A simpler helper method for `from_raw_fd`, that does dup of file
/// descriptor, so is actually safe to use (but can fail)
pub fn dup_file<F: AsRawFd>(file: &F) -> io::Result<Stdio> {
dup_file_cloexec(file).map(|f| Stdio::Fd(f))
}
/// A simpler helper method for `from_raw_fd`, that consumes file
///
/// Note: we assume that file descriptor **already has** the `CLOEXEC`
/// flag. This is by default for all files opened by rust.
pub fn from_file<F: IntoRawFd>(file: F) -> Stdio {
Stdio::Fd(Closing(file.into_raw_fd()))
}
}
impl Fd {
/// Create a pipe so that child can read from it
pub fn piped_read() -> Fd { Fd::ReadPipe }
/// Create a pipe so that child can write to it
pub fn piped_write() -> Fd { Fd::WritePipe }
/// Inherit the child descriptor from parent
///
/// Not very useful for custom file descriptors better use `from_file()`
pub fn inherit() -> Fd { Fd::Inherit }
/// Create a readable pipe that always has end of file condition
pub fn read_null() -> Fd { Fd::ReadNull }
/// Create a writable pipe that ignores all the input
pub fn write_null() -> Fd { Fd::WriteNull }
/// A simpler helper method for `from_raw_fd`, that does dup of file
/// descriptor, so is actually safe to use (but can fail)
pub fn dup_file<F: AsRawFd>(file: &F) -> io::Result<Fd> {
dup_file_cloexec(file).map(|f| Fd::Fd(f))
}
/// A simpler helper method for `from_raw_fd`, that consumes file
pub fn from_file<F: IntoRawFd>(file: F) -> Fd {
Fd::Fd(Closing(file.into_raw_fd()))
}
}
impl Closing {
pub fn new(fd: RawFd) -> Closing {
Closing(fd)
}
}
impl AsRawFd for Closing {
fn as_raw_fd(&self) -> RawFd {
return self.0;
}
}
impl Drop for Closing {
fn drop(&mut self) {
unsafe {
libc::close(self.0);
}
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/caps.rs | src/caps.rs | #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[allow(missing_docs, non_camel_case_types)]
pub enum Capability {
CAP_CHOWN = 0,
CAP_DAC_OVERRIDE = 1,
CAP_DAC_READ_SEARCH = 2,
CAP_FOWNER = 3,
CAP_FSETID = 4,
CAP_KILL = 5,
CAP_SETGID = 6,
CAP_SETUID = 7,
CAP_SETPCAP = 8,
CAP_LINUX_IMMUTABLE = 9,
CAP_NET_BIND_SERVICE = 10,
CAP_NET_BROADCAST = 11,
CAP_NET_ADMIN = 12,
CAP_NET_RAW = 13,
CAP_IPC_LOCK = 14,
CAP_IPC_OWNER = 15,
CAP_SYS_MODULE = 16,
CAP_SYS_RAWIO = 17,
CAP_SYS_CHROOT = 18,
CAP_SYS_PTRACE = 19,
CAP_SYS_PACCT = 20,
CAP_SYS_ADMIN = 21,
CAP_SYS_BOOT = 22,
CAP_SYS_NICE = 23,
CAP_SYS_RESOURCE = 24,
CAP_SYS_TIME = 25,
CAP_SYS_TTY_CONFIG = 26,
CAP_MKNOD = 27,
CAP_LEASE = 28,
CAP_AUDIT_WRITE = 29,
CAP_AUDIT_CONTROL = 30,
CAP_SETFCAP = 31,
CAP_MAC_OVERRIDE = 32,
CAP_MAC_ADMIN = 33,
CAP_SYSLOG = 34,
CAP_WAKE_ALARM = 35,
CAP_BLOCK_SUSPEND = 36,
CAP_AUDIT_READ = 37,
#[doc(hidden)]
__NonExhaustive,
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/linux.rs | src/linux.rs | use std::ffi::OsStr;
use std::io;
use std::os::unix::io::AsRawFd;
use std::path::Path;
use nix::sys::signal::{Signal};
use crate::ffi_util::ToCString;
use crate::{Command, Namespace};
use crate::idmap::{UidMap, GidMap};
use crate::stdio::dup_file_cloexec;
use crate::namespace::to_clone_flag;
use crate::caps::Capability;
impl Command {
/// Allow child process to daemonize. By default we run equivalent of
/// `set_parent_death_signal(SIGKILL)`. See the `set_parent_death_signal`
/// for better explanation.
pub fn allow_daemonize(&mut self) -> &mut Command {
self.config.death_sig = None;
self
}
/// Set a signal that is sent to a process when it's parent is dead.
/// This is by default set to `SIGKILL`. And you should keep it that way
/// unless you know what you are doing.
///
/// Particularly you should consider the following choices:
///
/// 1. Instead of setting ``PDEATHSIG`` to some other signal, send signal
/// yourself and wait until child gracefully finishes.
///
/// 2. Instead of daemonizing use ``systemd``/``upstart``/whatever system
/// init script to run your service
///
/// Another issue with this option is that it works only with immediate
/// child. To better control all descendant processes you may need the
/// following:
///
/// 1. The `prctl(PR_SET_CHILD_SUBREAPER..)` in parent which allows to
/// "catch" descendant processes.
///
/// 2. The pid namespaces
///
/// The former is out of scope of this library. The latter works by
/// ``cmd.unshare(Namespace::Pid)``, but you may need to setup mount points
/// and other important things (which are out of scope too).
///
/// To reset this behavior use ``allow_daemonize()``.
///
pub fn set_parent_death_signal(&mut self, sig: Signal) -> &mut Command {
self.config.death_sig = Some(sig);
self
}
/// Set chroot dir. Only absolute path is supported
///
/// This method has a non-standard security feature: even if current_dir
/// is unspecified we set it to the directory inside the new root dir.
/// see more details in the description of `Command::current_dir`.
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root. If chroot dir is relative it's relative
/// to either suffix of the current directory with stripped off pivot dir
/// or the pivot dir itself (if old workdir is not prefixed by pivot dir)
///
/// # Panics
///
/// If directory is not absolute
pub fn chroot_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command
{
let dir = dir.as_ref();
if !dir.is_absolute() {
panic!("Chroot dir must be absolute");
}
self.chroot_dir = Some(dir.to_path_buf());
self
}
/// Moves the root of the file system to the directory `put_old` and
/// makes `new_root` the new root file system. Also it's optionally
/// unmount `new_root` mount point after moving root (but it must exist
/// anyway).
///
/// The documentation says that `put_old` must be underneath the
/// `new_root`. Currently we have a restriction that both must be absolute
/// and `new_root` be prefix of `put_old`, but we may lift it later.
///
/// **Warning** if you don't unshare the mount namespace you will get
/// moved filesystem root for *all processes running in that namespace*
/// including parent (currently running) process itself. If you don't
/// run equivalent to ``mount --make-private`` for the old root filesystem
/// and set ``unmount`` to true, you may get unmounted filesystem for
/// running processes too.
///
/// See `man 2 pivot` for further details
///
/// Note that if both chroot dir and pivot_root specified. The chroot dir
/// is applied after pivot root.
///
/// # Panics
///
/// Panics if either path is not absolute or new_root is not a prefix of
/// put_old.
pub fn pivot_root<A: AsRef<Path>, B:AsRef<Path>>(&mut self,
new_root: A, put_old: B, unmount: bool)
-> &mut Command
{
let new_root = new_root.as_ref();
let put_old = put_old.as_ref();
if !new_root.is_absolute() {
panic!("New root must be absolute");
};
if !put_old.is_absolute() {
panic!("The `put_old` dir must be absolute");
}
let mut old_cmp = put_old.components();
for (n, o) in new_root.components().zip(old_cmp.by_ref()) {
if n != o {
panic!("The new_root is not a prefix of put old");
}
}
self.pivot_root = Some((new_root.to_path_buf(), put_old.to_path_buf(),
unmount));
self
}
/// Unshare given namespaces
///
/// Note: each namespace have some consequences on how new process will
/// work, some of them are described in the `Namespace` type documentation.
pub fn unshare<'x>(&mut self, iter: impl IntoIterator<Item=&'x Namespace>)
-> &mut Command
{
for ns in iter {
self.config.namespaces |= to_clone_flag(*ns);
}
self
}
/// Reassociate child process with a namespace specified by a file
/// descriptor
///
/// `file` argument is an open file referring to a namespace
///
/// 'ns' is a namespace type
///
/// See `man 2 setns` for further details
///
/// Note: using `unshare` and `setns` for the same namespace is meaningless.
pub fn set_namespace<F: AsRawFd>(&mut self, file: &F, ns: Namespace)
-> io::Result<&mut Command>
{
let fd = dup_file_cloexec(file)?;
self.config.setns_namespaces.insert(ns, fd);
Ok(self)
}
/// Sets user id and group id mappings for new process
///
/// This automatically enables `User` namespace. You should also set `uid`
/// and `gid` with respective methods for the new process.
///
/// Note there are basically two ways to enable id maps:
///
/// 1. Write them directly
/// 2. Invoke a `newuidmap`, `newgidmap` commands
///
/// First option works either if current process is root or if resulting
/// map only contains current user in the mapping.
///
/// The library will not try to guess the behavior. By default it will
/// write directly. You need to call the `set_id_map_commands` when you
/// want non-default behavior.
///
/// See `man 7 user_namespaces` for more info
pub fn set_id_maps(&mut self, uid_map: Vec<UidMap>, gid_map: Vec<GidMap>)
-> &mut Command
{
self.unshare(&[Namespace::User]);
self.config.id_maps = Some((uid_map, gid_map));
self
}
/// Set path to command-line utilities for writing uid/gid maps
///
/// The utilities provided my obey same interface as `newuidmap` and
/// `newgidmap` from `shadow` (or sometimes `uidmap`) package. To get it
/// working you usually need to setup `/etc/subuid` and `/etc/subgid`
/// files.
///
/// See `man 1 newuidmap`, `man 1 newgidmap` for details
///
/// This method is no-op unless `set_id_maps` is called.
pub fn set_id_map_commands<A: AsRef<Path>, B: AsRef<Path>>(&mut self,
newuidmap: A, newgidmap: B)
-> &mut Command
{
self.id_map_commands = Some((
newuidmap.as_ref().to_path_buf(),
newgidmap.as_ref().to_path_buf()));
self
}
/// Keep signal mask intact after executing child, keeps also ignored
/// signals
///
/// By default signal mask is empty and all signals are reset to the
/// `SIG_DFL` value right before `execve()` syscall.
///
/// This is only useful if started process is aware of the issue and sets
/// sigmasks to some reasonable value. When used wisely it may avoid some
/// race conditions when signal is sent after child is cloned but before
/// child have been able to establish it's state.
pub fn keep_sigmask(&mut self) -> &mut Command {
self.config.restore_sigmask = false;
self
}
/// Set the argument zero for the process
///
/// By default argument zero is same as path to the program to run. You
/// may set it to a short name of the command or to something else to
/// pretend there is a symlink to a program (for example to run `gzip` as
/// `gunzip`).
pub fn arg0<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command {
self.args[0] = arg.to_cstring();
self
}
/// Makes child process a group leader
///
/// If child process is being launched as a foreground job,
/// the child process group needs to be put into the foreground on
/// the controlling terminal using `tcsetpgrp`. To request status
/// information from stopped child process you should call `waitpid` with
/// `WUNTRACED` flag. And then check status with `WIFSTOPPED` macro.
/// After giving child process group access to the controlling terminal
/// you should send the SIGCONT signal to the child process group.
pub fn make_group_leader(&mut self, make_group_leader: bool) -> &mut Command {
self.config.make_group_leader = make_group_leader;
self
}
/// Inserts a magic environment variable that will contain pid of spawned
/// process
///
/// This is usually needed to avoid accidental propagation of the
/// environment variables targeted only at this specific process.
///
/// # Example
///
/// This is how you can encode [systemd activation] protocol:
///
/// ```rust,ignore
/// cmd.env_var_with_pid("LISTEN_PID");
/// cmd.env("LISTEN_FDS", "1");
/// ```
///
/// [systemd activation]: https://www.freedesktop.org/software/systemd/man/sd_listen_fds.html
pub fn env_var_with_pid<K>(&mut self, key: K) -> &mut Command
where K: AsRef<OsStr>,
{
self.init_env_map();
self.environ.as_mut().unwrap().remove(key.as_ref());
self.pid_env_vars.insert(key.as_ref().to_os_string());
self
}
/// Drop all capabilities, but keep only ones set by this setter
///
/// This method sets three or four sets of capabilities:
/// * Permitted
/// * Inherited
/// * Effective
/// * Ambient (if supported)
///
/// This works both when uid changes (from 0 to other) and when it
/// isn't changed, but requires process to have all capabilities
/// granted by this method.
///
/// This method replaces whole capability mask on each invocation
pub fn keep_caps<'x>(&mut self,
caps: impl IntoIterator<Item=&'x Capability>)
{
let mut buf = [0u32; 2];
for item in caps {
let item = *item as u32;
buf[(item >> 5) as usize] |= 1 << (item & 31);
}
self.keep_caps = Some(buf);
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/callbacks.rs | src/callbacks.rs | use std::io;
use crate::{Command, BoxError};
impl Command {
/// Set a callback to run when child is already forked but not yet run
///
/// When starting a child we sometimes need more setup from the parent,
/// for example: to configure pid namespaces for the unprivileged
/// process (child) by privileged process (parent).
///
/// This callback runs in **parent** process after all built-in setup is
/// done (setting uid namespaces). It always run before ``pre_exec``
/// callback in child.
///
/// If callback returns error, process is shut down.
///
/// Each invocation **replaces** callback,
/// so there is only one of them can be called.
///
pub fn before_unfreeze(
&mut self,
f: impl FnMut(u32) -> Result<(), BoxError> + 'static,
) -> &mut Self {
self.before_unfreeze = Some(Box::new(f));
self
}
/// Set a callback to run in the child before calling exec
///
/// The callback is executed right before `execve` system calls.
/// All other modifications of the environment are already applied
/// at this moment. It always run after ``before_unfreeze`` in parent.
///
/// **Warning** this callback must not do any memory (de)allocations,
/// use mutexes, otherwise process may crash or deadlock. Only bare
/// syscalls are allowed (use `libc` crate).
///
/// The closure is allowed to return an I/O error whose
/// OS error code will be communicated back to the parent
/// and returned as an error from when the spawn was requested.
///
/// Note: unlike same method in stdlib,
/// each invocation of this method **replaces** callback,
/// so there is only one of them can be called.
pub unsafe fn pre_exec(
&mut self,
f: impl Fn() -> io::Result<()> + Send + Sync + 'static,
) -> &mut Self {
self.pre_exec = Some(Box::new(f));
self
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/status.rs | src/status.rs | use std::fmt;
use crate::{Signal};
/// The exit status of a process
///
/// Returned either by `reap_zombies()` or by `child_events()`
/// or by `Child::wait()`
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExitStatus {
/// Process exited normally with some exit code
Exited(i8),
/// Process was killed by a signal (bool flag is true when core is dumped)
Signaled(Signal, /* dore dumped */bool)
}
impl ExitStatus {
/// Returns `true` if this exit status means successful exit
pub fn success(&self) -> bool {
self == &ExitStatus::Exited(0)
}
/// Returns exit code if the process has exited normally
pub fn code(&self) -> Option<i32> {
match self {
&ExitStatus::Exited(e) => Some(e as i32),
&ExitStatus::Signaled(_, _) => None,
}
}
/// Returns signal number if he process was killed by signal
pub fn signal(&self) -> Option<i32> {
match self {
&ExitStatus::Exited(_) => None,
&ExitStatus::Signaled(sig, _) => Some(sig as i32),
}
}
}
impl fmt::Display for ExitStatus {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::ExitStatus::*;
match self {
&Exited(c) => write!(fmt, "exited with code {}", c),
&Signaled(sig, false) => {
write!(fmt, "killed by signal {:?}[{}]",
sig, sig as i32)
}
&Signaled(sig, true) => {
write!(fmt, "killed by signal {:?}[{}] (core dumped)",
sig, sig as i32)
}
}
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/chroot.rs | src/chroot.rs | use std::ffi::CString;
pub struct Pivot {
pub new_root: CString,
pub put_old: CString,
pub old_inside: CString,
pub workdir: CString,
pub unmount_old_root: bool,
}
pub struct Chroot {
pub root: CString,
pub workdir: CString,
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/error.rs | src/error.rs | use std::io;
use std::fmt;
use crate::status::ExitStatus;
use nix;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorCode {
CreatePipe = 1,
Fork = 2,
Exec = 3,
Chdir = 4,
ParentDeathSignal = 5,
PipeError = 6,
StdioError = 7,
SetUser = 8,
ChangeRoot = 9,
SetIdMap = 10,
SetPGid = 11,
SetNs = 12,
CapSet = 13,
PreExec = 14,
}
/// Error runnning process
///
/// This type has very large number of options and it's enum only to be
/// compact. Probably you shouldn't match on the error cases but just format
/// it for user into string.
#[derive(Debug)]
pub enum Error {
/// Unknown nix error
///
/// Frankly, this error should not happen when running process. We just
/// keep it here in case `nix` returns this error, which should not happen.
NixError, // Not sure it's possible, but it is here to convert from
// nix::Error safer
/// Some invalid error code received from child application
UnknownError,
/// Error happened when we were trying to create pipe. The pipes used for
/// two purposes: (a) for the process's stdio (`Stdio::pipe()` or
/// `Stdio::null()`), (b) internally to wake up child process and return
/// error back to the parent.
// TODO(tailhook) should create pipe be split into PipeError and StdioError
CreatePipe(i32),
/// Error when forking/cloning process
Fork(i32),
/// Error when running execve() systemcall
Exec(i32),
/// Error when setting working directory specified by user
Chdir(i32),
/// Unable to set death signal (probably signal number invalid)
ParentDeathSignal(i32),
/// Error reading/writing through one of the two signal pipes
PipeError(i32),
/// Error waiting for process (for some functions only, for example
/// ``Command::status()``). It probably means someone already waited for
/// the process, for example it might be other thread, or signal handler.
WaitError(i32),
/// Error setting up stdio for process
StdioError(i32),
/// Could not set supplementary groups, group id or user id for the
/// process
SetUser(i32),
/// Error changing root, it explains `chroot`, `pivot_root` system calls
/// and setting working directory inside new root. Also includes unmounting
/// old file system for pivot_root case.
ChangeRoot(i32),
/// Error setting uid or gid map. May be either problem running
/// `newuidmap`/`newgidmap` command or writing the mapping file directly
SetIdMap(i32),
/// Auxillary command failed
///
/// There are two auxillary commands for now: `newuidmap` and `newgidmap`.
/// They run only when uid mappings (user namespaces) are enabled.
///
/// Note that failing to run the binary results to `SedIdMap(sys_errno)`,
/// this error contains status code of command that was succesfullly
/// spawned.
AuxCommandExited(i32),
/// Auxillary command was killed by signal
///
/// Similar to `AuxCommandExited` but when command was killed
AuxCommandKilled(i32),
/// Error when calling setpgid function
SetPGid(i32),
/// Error when calling setns syscall
SetNs(i32),
/// Error when calling capset syscall
CapSet(i32),
/// Before unfreeze callback error
BeforeUnfreeze(Box<dyn (::std::error::Error) + Send + Sync + 'static>),
/// Before exec callback error
PreExec(i32),
}
impl Error {
/// Similarly to `io::Error` returns bare error code
pub fn raw_os_error(&self) -> Option<i32> {
use self::Error::*;
match self {
&UnknownError => None,
&NixError => None,
&CreatePipe(x) => Some(x),
&Fork(x) => Some(x),
&Exec(x) => Some(x),
&Chdir(x) => Some(x),
&ParentDeathSignal(x) => Some(x),
&PipeError(x) => Some(x),
&WaitError(x) => Some(x),
&StdioError(x) => Some(x),
&SetUser(x) => Some(x),
&ChangeRoot(x) => Some(x),
&SetIdMap(x) => Some(x),
&AuxCommandExited(..) => None,
&AuxCommandKilled(..) => None,
&SetPGid(x) => Some(x),
&SetNs(x) => Some(x),
&CapSet(x) => Some(x),
&BeforeUnfreeze(..) => None,
&PreExec(x) => Some(x),
}
}
}
impl Error {
fn title(&self) -> &'static str {
use self::Error::*;
match self {
&UnknownError => "unexpected value received via signal pipe",
&NixError => "some unknown nix error",
&CreatePipe(_) => "can't create pipe",
&Fork(_) => "error when forking",
&Exec(_) => "error when executing",
&Chdir(_) => "error when setting working directory",
&ParentDeathSignal(_) => "error when death signal",
&PipeError(_) => "error in signalling pipe",
&WaitError(_) => "error in waiting for child",
&StdioError(_) => "error setting up stdio for child",
&SetUser(_) => "error setting user or groups",
&ChangeRoot(_) => "error changing root directory",
&SetIdMap(_) => "error setting uid/gid mappings",
&AuxCommandExited(_) => "aux command exited with non-zero code",
&AuxCommandKilled(_) => "aux command was killed by signal",
&SetPGid(_) => "error when calling setpgid",
&SetNs(_) => "error when calling setns",
&CapSet(_) => "error when setting capabilities",
&BeforeUnfreeze(_) => "error in before_unfreeze callback",
&PreExec(_) => "error in pre_exec callback",
}
}
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use crate::Error::*;
if let Some(code) = self.raw_os_error() {
let errno = nix::errno::from_i32(code);
if let nix::errno::Errno::UnknownErrno = errno {
// May be OS knows error name better
write!(fmt, "{}: {}", self.title(),
io::Error::from_raw_os_error(code))
} else {
// Format similar to that of std::io::Error
write!(fmt, "{}: {} (os error {})", self.title(),
errno.desc(), code)
}
} else {
match self {
BeforeUnfreeze(err) => {
write!(fmt, "{}: {}", self.title(), err)
}
_ => write!(fmt, "{}", self.title()),
}
}
}
}
#[inline]
pub fn result<T, E: IntoError>(code: ErrorCode, r: Result<T, E>)
-> Result<T, Error>
{
r.map_err(|e| e.into_error(code))
}
#[inline]
pub fn cmd_result<E: IntoError>(def_code: ErrorCode, r: Result<ExitStatus, E>)
-> Result<(), Error>
{
match r.map_err(|e| e.into_error(def_code))? {
ExitStatus::Exited(0) => Ok(()),
ExitStatus::Exited(x) => Err(Error::AuxCommandExited(x as i32)),
ExitStatus::Signaled(x, _) => Err(Error::AuxCommandKilled(x as i32)),
}
}
pub trait IntoError {
fn into_error(self, code: ErrorCode) -> Error;
}
impl IntoError for nix::Error {
fn into_error(self, code: ErrorCode) -> Error {
match self {
nix::Error::Sys(x) => code.wrap(x as i32),
_ => Error::NixError,
}
}
}
impl IntoError for io::Error {
fn into_error(self, code: ErrorCode) -> Error {
code.wrap(self.raw_os_error().unwrap_or(-1))
}
}
impl IntoError for Error {
fn into_error(self, code: ErrorCode) -> Error {
code.wrap(self.raw_os_error().unwrap_or(-1))
}
}
impl ErrorCode {
pub fn wrap(self, errno: i32) -> Error {
use self::ErrorCode as C;
use self::Error as E;
match self {
C::CreatePipe => E::CreatePipe(errno),
C::Fork => E::Fork(errno),
C::Exec => E::Exec(errno),
C::Chdir => E::Chdir(errno),
C::ParentDeathSignal => E::ParentDeathSignal(errno),
C::PipeError => E::PipeError(errno),
C::StdioError => E::StdioError(errno),
C::SetUser => E::SetUser(errno),
C::ChangeRoot => E::ChangeRoot(errno),
C::SetIdMap => E::SetIdMap(errno),
C::SetPGid => E::SetPGid(errno),
C::SetNs => E::SetNs(errno),
C::CapSet => E::CapSet(errno),
C::PreExec => E::PreExec(errno),
}
}
pub fn from_i32(code: i32, errno: i32) -> Error {
use self::ErrorCode as C;
use self::Error as E;
match code {
c if c == C::CreatePipe as i32 => E::CreatePipe(errno),
c if c == C::Fork as i32 => E::Fork(errno),
c if c == C::Exec as i32 => E::Exec(errno),
c if c == C::Chdir as i32 => E::Chdir(errno),
c if c == C::ParentDeathSignal as i32
=> E::ParentDeathSignal(errno),
c if c == C::PipeError as i32 => E::PipeError(errno),
c if c == C::StdioError as i32 => E::StdioError(errno),
c if c == C::SetUser as i32 => E::SetUser(errno),
c if c == C::ChangeRoot as i32 => E::ChangeRoot(errno),
c if c == C::SetIdMap as i32 => E::SetIdMap(errno),
c if c == C::SetPGid as i32 => E::SetPGid(errno),
c if c == C::SetNs as i32 => E::SetNs(errno),
c if c == C::CapSet as i32 => E::CapSet(errno),
// no BeforeUnfreeze, because can't be in a child
c if c == C::PreExec as i32 => E::PreExec(errno),
_ => E::UnknownError,
}
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/debug.rs | src/debug.rs | use std::fmt::{self, Display};
use nix::sched::CloneFlags;
use crate::Command;
/// This is a builder for various settings of how command may be printed
///
/// Use `format!("{}", cmd.display(style))` to actually print a command.
#[derive(Clone, Debug)]
pub struct Style {
cmd_only: bool,
print_env: bool,
show_path: bool,
}
/// A temporary value returned from `Command::display` for the sole purpose
/// of being `Display`'ed.
pub struct Printer<'a>(&'a Command, &'a Style);
impl Style {
/// Create a new style object that matches to how `fmt::Debug` works for
/// the command
pub fn debug() -> Style {
Style {
cmd_only: false,
print_env: true,
show_path: true,
}
}
/// Create a simple clean user-friendly display of the command
///
/// Note: this kind of pretty-printing omit many important parts of command
/// and may be ambiguous.
pub fn short() -> Style {
Style {
cmd_only: true,
print_env: false,
show_path: false,
}
}
/// Toggle printing of environment
///
/// When `false` is passed we only show `environ[12]`, i.e. a number of
/// environment variables. Default is `true` for `Style::debug`
/// constructor.
///
/// This method does nothing when using `Style::short` construtor
pub fn env(mut self, enable: bool) -> Style {
self.print_env = enable;
self
}
/// Toggle printing of full path to the executable
///
/// By default we don't print full executable path in `Style::short` mode.
///
/// Note: if this flag is disabled (default) we only show a name from
/// `arg0`, instead of executable path. When flag is
/// enabled, the `arg0` is shown alongside with executable path in
/// parethesis if the values differ.
///
/// This method does nothing when using `Style::debug` constructor
pub fn path(mut self, enable: bool) -> Style {
self.show_path = enable;
self
}
}
impl<'a> fmt::Display for Printer<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let Printer(cmd, opt) = *self;
if opt.cmd_only {
if opt.show_path {
write!(fmt, "{:?}", cmd.filename)?;
if cmd.args[0] != cmd.filename {
write!(fmt, " ({:?})", &cmd.args[0])?;
}
} else {
let path = if cmd.args[0] != cmd.filename {
&cmd.args[0]
} else {
&cmd.filename
};
let last_slash = path.as_bytes().iter()
.rposition(|&x| x == b'/');
if let Some(off) = last_slash {
write!(fmt, "{:?}",
&String::from_utf8_lossy(&path.as_bytes()[off+1..]))?;
} else {
write!(fmt, "{:?}", path)?;
}
}
for arg in cmd.args[1..].iter() {
write!(fmt, " {:?}", arg)?;
}
} else {
write!(fmt, "<Command {:?}", cmd.filename)?;
if cmd.args[0] != cmd.filename {
write!(fmt, " ({:?})", &cmd.args[0])?;
}
for arg in cmd.args[1..].iter() {
write!(fmt, " {:?}", arg)?;
}
if opt.print_env {
if let Some(ref env) = cmd.environ {
write!(fmt, "; environ: {{")?;
for (ref k, ref v) in env.iter() {
write!(fmt, "{:?}={:?},", k, v)?;
}
write!(fmt, "}}")?;
}
} else {
if let Some(ref env) = cmd.environ {
write!(fmt, "; environ[{}]", env.len())?;
}
}
if let Some(ref dir) = cmd.chroot_dir {
write!(fmt, "; chroot={:?}", dir)?;
}
if let Some((ref new, ref old, unmount)) = cmd.pivot_root {
write!(fmt, "; pivot_root=({:?};{:?};{})", new, old, unmount)?;
}
if cmd.config.namespaces != CloneFlags::empty() {
// TODO(tailhook)
}
if let Some(ref dir) = cmd.config.work_dir {
write!(fmt, "; work-dir={:?}", dir)?;
}
if let Some((ref uidm, ref gidm)) = cmd.config.id_maps {
write!(fmt, "; uid_map={:?}", uidm)?;
write!(fmt, "; gid_map={:?}", gidm)?;
}
if let Some(ref uid) = cmd.config.uid {
write!(fmt, "; uid={}", uid)?;
}
if let Some(ref gid) = cmd.config.gid {
write!(fmt, "; gid={}", gid)?;
}
if let Some(ref gids) = cmd.config.supplementary_gids {
write!(fmt, "; gids={:?}", gids)?;
}
// TODO(tailhook) stdio, sigchld, death_sig,
// sigmask, id-map-commands
write!(fmt, ">")?
}
Ok(())
}
}
impl Command {
/// Returns the object that implements Display
pub fn display<'a>(&'a self, style: &'a Style) -> Printer<'a> {
Printer(self, style)
}
}
impl fmt::Debug for Command {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
Printer(self, &Style::debug()).fmt(fmt)
}
}
#[cfg(test)]
mod test {
use crate::{Command, Style};
#[test]
fn test_debug() {
let mut cmd = Command::new("/bin/hello");
cmd.env_clear();
cmd.env("A", "B");
assert_eq!(&format!("{:?}", cmd),
r#"<Command "/bin/hello"; environ: {"A"="B",}>"#);
}
#[test]
fn test_comprehensive() {
let mut cmd = Command::new("/bin/hello");
cmd.env_clear();
cmd.env("A", "B");
assert_eq!(&format!("{}", cmd.display(&Style::debug())),
r#"<Command "/bin/hello"; environ: {"A"="B",}>"#);
}
#[test]
fn test_pretty() {
let mut cmd = Command::new("/bin/hello");
cmd.env_clear();
cmd.arg("world!");
assert_eq!(&format!("{}", cmd.display(&Style::short())),
r#""hello" "world!""#);
}
#[test]
fn test_no_env() {
let mut cmd = Command::new("/bin/hello");
cmd.env_clear();
cmd.env("A", "B");
assert_eq!(&format!("{}", cmd.display(&Style::debug().env(false))),
r#"<Command "/bin/hello"; environ[1]>"#);
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/runtime.rs | src/runtime.rs | rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false | |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/wait.rs | src/wait.rs | use std::io;
use std::os::unix::io::RawFd;
use nix::Error;
use nix::unistd::Pid;
use nix::sys::wait::waitpid;
use nix::sys::signal::{Signal, SIGKILL, kill};
use nix::errno::Errno::EINTR;
use libc::pid_t;
use crate::pipe::PipeHolder;
use crate::{Child, ExitStatus, PipeReader, PipeWriter};
impl Child {
/// Returns pid of the process (a mirror of std method)
pub fn id(&self) -> u32 {
self.pid as u32
}
/// Returns pid of process with correct pid_t type
pub fn pid(&self) -> pid_t {
self.pid
}
/// Synchronously wait for child to complete and return exit status
pub fn wait(&mut self) -> Result<ExitStatus, io::Error> {
if let Some(x) = self.status {
return Ok(x);
}
let status = self._wait()?;
self.status = Some(status);
Ok(status)
}
fn _wait(&mut self) -> Result<ExitStatus, io::Error> {
use nix::sys::wait::WaitStatus::*;
loop {
match waitpid(Some(Pid::from_raw(self.pid)), None) {
Ok(PtraceEvent(..)) => {}
Ok(PtraceSyscall(..)) => {}
Ok(Exited(x, status)) => {
assert!(i32::from(x) == self.pid);
return Ok(ExitStatus::Exited(status as i8));
}
Ok(Signaled(x, sig, core)) => {
assert!(i32::from(x) == self.pid);
return Ok(ExitStatus::Signaled(sig, core));
}
Ok(Stopped(_, _)) => unreachable!(),
Ok(Continued(_)) => unreachable!(),
Ok(StillAlive) => unreachable!(),
Err(Error::Sys(EINTR)) => continue,
Err(Error::InvalidPath) => unreachable!(),
Err(Error::InvalidUtf8) => unreachable!(),
Err(Error::UnsupportedOperation) => {
return Err(io::Error::new(io::ErrorKind::Other,
"nix error: unsupported operation"));
}
Err(Error::Sys(x)) => {
return Err(io::Error::from_raw_os_error(x as i32))
}
}
}
}
/// Send arbitrary unix signal to the process
pub fn signal(&self, signal: Signal) -> Result<(), io::Error> {
// This prevents (somewhat not-reliable) killing some other process
// with same pid
if self.status.is_some() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"invalid argument: can't kill an exited process",
))
}
kill(Pid::from_raw(self.pid), signal)
.map_err(|e| match e {
Error::Sys(x) => io::Error::from_raw_os_error(x as i32),
Error::InvalidPath => unreachable!(),
Error::InvalidUtf8 => unreachable!(),
Error::UnsupportedOperation => {
io::Error::new(io::ErrorKind::Other,
"nix error: unsupported operation")
}
})
}
/// Kill process with SIGKILL signal
pub fn kill(&self) -> Result<(), io::Error> {
self.signal(SIGKILL)
}
/// Returns pipe reader for a pipe declared with `file_descriptor()`
///
/// Returns None for wrong configuration or when called twice for same
/// descriptor
pub fn take_pipe_reader(&mut self, fd: RawFd) -> Option<PipeReader> {
match self.fds.remove(&fd) {
Some(PipeHolder::Reader(x)) => Some(x),
_ => None,
}
}
/// Returns pipe writer for a pipe declared with `file_descriptor()`
///
/// Returns None for wrong configuration or when called twice for same
/// descriptor
pub fn take_pipe_writer(&mut self, fd: RawFd) -> Option<PipeWriter> {
match self.fds.remove(&fd) {
Some(PipeHolder::Writer(x)) => Some(x),
_ => None,
}
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/idmap.rs | src/idmap.rs | use libc::{uid_t, gid_t};
/// Entry (row) in the uid map
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct UidMap {
/// First uid inside the guest namespace
pub inside_uid: uid_t,
/// First uid in external (host) namespace
pub outside_uid: uid_t,
/// Number of uids that this entry allows starting from inside/outside uid
pub count: uid_t,
}
/// Entry (row) in the gid map
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct GidMap {
/// First gid inside the guest namespace
pub inside_gid: gid_t,
/// First gid in external (host) namespace
pub outside_gid: gid_t,
/// Number of gids that this entry allows starting from inside/outside gid
pub count: gid_t,
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/run.rs | src/run.rs | use std::collections::HashMap;
use std::env::current_dir;
use std::ffi::CString;
use std::fs::File;
use std::io::{self, Read, Write};
use std::iter::repeat;
use std::os::unix::ffi::{OsStrExt};
use std::os::unix::io::{RawFd, AsRawFd};
use std::path::{Path, PathBuf};
use std::ptr;
use libc::{c_char, close};
use nix;
use nix::errno::Errno::EINTR;
use nix::fcntl::{fcntl, FcntlArg, open};
use nix::fcntl::OFlag;
use nix::sched::{clone, CloneFlags};
use nix::sys::signal::{SIGKILL, SIGCHLD, kill};
use nix::sys::stat::Mode;
use nix::sys::wait::waitpid;
use nix::unistd::{setpgid, Pid};
use crate::child;
use crate::config::Config;
use crate::{Command, Child, ExitStatus};
use crate::error::{Error, result, cmd_result};
use crate::error::ErrorCode as Err;
use crate::pipe::{Pipe, PipeReader, PipeWriter, PipeHolder};
use crate::stdio::{Fd, Closing};
use crate::chroot::{Pivot, Chroot};
use crate::ffi_util::ToCString;
use crate::namespace::to_clone_flag;
pub const MAX_PID_LEN: usize = 12;
pub struct ChildInfo<'a> {
pub filename: *const c_char,
pub args: &'a [*const c_char],
// this is mut because we write pid to environ
pub environ: &'a [*mut c_char],
pub cfg: &'a Config,
pub chroot: &'a Option<Chroot>,
pub pivot: &'a Option<Pivot>,
pub wakeup_pipe: RawFd,
pub error_pipe: RawFd,
pub fds: &'a [(RawFd, RawFd)],
/// This map may only be used for lookup but not for iteration!
pub fd_lookup: &'a HashMap<RawFd, RawFd>,
pub close_fds: &'a [(RawFd, RawFd)],
pub setns_namespaces: &'a [(CloneFlags, RawFd)],
pub pid_env_vars: &'a [(usize, usize)],
pub keep_caps: &'a Option<[u32; 2]>,
pub pre_exec: &'a Option<Box<dyn Fn() -> Result<(), io::Error>>>,
}
fn raw_with_null(arr: &Vec<CString>) -> Vec<*const c_char> {
let mut vec = Vec::with_capacity(arr.len() + 1);
for i in arr {
vec.push(i.as_ptr());
}
vec.push(ptr::null());
return vec;
}
fn raw_with_null_mut(arr: &mut Vec<Vec<u8>>) -> Vec<*mut c_char> {
let mut vec = Vec::with_capacity(arr.len() + 1);
for i in arr {
vec.push(i.as_mut_ptr() as *mut c_char);
}
vec.push(ptr::null_mut());
return vec;
}
fn relative_to<A:AsRef<Path>, B:AsRef<Path>>(dir: A, rel: B, absolute: bool)
-> Option<PathBuf>
{
let dir = dir.as_ref();
let rel = rel.as_ref();
let mut dircmp = dir.components();
for (dc, rc) in rel.components().zip(dircmp.by_ref()) {
if dc != rc {
return None;
}
}
if absolute {
Some(Path::new("/").join(dircmp.as_path()))
} else {
Some(dircmp.as_path().to_path_buf())
}
}
fn prepare_descriptors(fds: &HashMap<RawFd, Fd>)
-> Result<(HashMap<RawFd, RawFd>, HashMap<RawFd, PipeHolder>,
Vec<Closing>), Error>
{
let mut inner = HashMap::new();
let mut outer = HashMap::new();
let mut guards = Vec::new();
for (&dest_fd, fdkind) in fds.iter() {
let mut fd = match fdkind {
&Fd::ReadPipe => {
let (rd, wr) = Pipe::new()?.split();
let fd = rd.into_fd();
guards.push(Closing::new(fd));
outer.insert(dest_fd, PipeHolder::Writer(wr));
fd
}
&Fd::WritePipe => {
let (rd, wr) = Pipe::new()?.split();
let fd = wr.into_fd();
guards.push(Closing::new(fd));
outer.insert(dest_fd, PipeHolder::Reader(rd));
fd
}
&Fd::ReadNull => {
// Need to keep fd with cloexec, until we are in child
let fd = result(Err::CreatePipe,
open(Path::new("/dev/null"),
OFlag::O_CLOEXEC|OFlag::O_RDONLY,
Mode::empty()))?;
guards.push(Closing::new(fd));
fd
}
&Fd::WriteNull => {
// Need to keep fd with cloexec, until we are in child
let fd = result(Err::CreatePipe,
open(Path::new("/dev/null"),
OFlag::O_CLOEXEC|OFlag::O_WRONLY,
Mode::empty()))?;
guards.push(Closing::new(fd));
fd
}
&Fd::Inherit => {
dest_fd
}
&Fd::Fd(ref x) => {
x.as_raw_fd()
}
};
// The descriptor must not clobber the descriptors that are passed to
// a child
while fd != dest_fd && fds.contains_key(&fd) {
fd = result(Err::CreatePipe,
fcntl(fd, FcntlArg::F_DUPFD_CLOEXEC(3)))?;
guards.push(Closing::new(fd));
}
inner.insert(dest_fd, fd);
}
Ok((inner, outer, guards))
}
impl Command {
/// Run the command and return exit status
pub fn status(&mut self) -> Result<ExitStatus, Error> {
// TODO(tailhook) stdin/stdout/stderr
self.spawn()?
.wait()
.map_err(|e| Error::WaitError(e.raw_os_error().unwrap_or(-1)))
}
/// Spawn the command and return a handle that can be waited for
pub fn spawn(&mut self) -> Result<Child, Error> {
// TODO(tailhook) We need mutable self only for init_env_map. Probably
// we might do this internally and don't modify Command. That would
// be more clear and also allow to print Display command easily in
// error handler
self.init_env_map();
unsafe { self.spawn_inner() }
}
unsafe fn spawn_inner(&mut self) -> Result<Child, Error> {
// TODO(tailhook) add RAII for pipes
let (wakeup_rd, wakeup) = Pipe::new()?.split();
let (errpipe, errpipe_wr) = Pipe::new()?.split();
let c_args = raw_with_null(&self.args);
let mut environ: Vec<_> = self.environ.as_ref().unwrap()
.iter().map(|(k, v)| {
let mut pair = k[..].as_bytes().to_vec();
pair.push(b'=');
pair.extend(v.as_bytes());
pair.push(0);
pair
}).collect();
let mut pid_env_vars = Vec::new();
for var_name in &self.pid_env_vars {
let mut pair = var_name[..].as_bytes().to_vec();
pair.push(b'=');
let (index, offset) = (environ.len(), pair.len());
pair.extend(repeat(0).take(MAX_PID_LEN+1));
environ.push(pair);
pid_env_vars.push((index, offset));
}
let c_environ: Vec<_> = raw_with_null_mut(&mut environ);
let (int_fds, ext_fds, _guards) = prepare_descriptors(&self.fds)?;
let pivot = self.pivot_root.as_ref().map(|&(ref new, ref old, unmnt)| {
Pivot {
new_root: new.to_cstring(),
put_old: old.to_cstring(),
old_inside: relative_to(old, new, true).unwrap().to_cstring(),
workdir: current_dir().ok()
.and_then(|cur| relative_to(cur, new, true))
.unwrap_or(PathBuf::from("/"))
.to_cstring(),
unmount_old_root: unmnt,
}
});
let chroot = self.chroot_dir.as_ref().map(|dir| {
let wrk_rel = if let Some((ref piv, _, _)) = self.pivot_root {
piv.join(relative_to(dir, "/", false).unwrap())
} else {
dir.to_path_buf()
};
Chroot {
root: dir.to_cstring(),
workdir: current_dir().ok()
.and_then(|cur| relative_to(cur, wrk_rel, true))
.unwrap_or(PathBuf::from("/"))
.to_cstring()
,
}
});
let mut nstack = [0u8; 4096];
let mut wakeup = Some(wakeup);
let mut wakeup_rd = Some(wakeup_rd);
let mut errpipe_wr = Some(errpipe_wr);
let args_slice = &c_args[..];
let environ_slice = &c_environ[..];
// We transform all hashmaps into vectors, because iterating over
// hash map involves closure which crashes in the child in unoptimized
// build
let fds = int_fds.iter().map(|(&x, &y)| (x, y)).collect::<Vec<_>>();
let close_fds = self.close_fds.iter().cloned().collect::<Vec<_>>();
let setns_ns = self.config.setns_namespaces.iter()
.map(|(ns, fd)| (to_clone_flag(*ns), fd.as_raw_fd()))
.collect::<Vec<_>>();
let pid = result(Err::Fork, clone(Box::new(|| -> isize {
// Note: mo memory allocations/deallocations here
close(wakeup.take().unwrap().into_fd());
let child_info = ChildInfo {
filename: self.filename.as_ptr(),
args: args_slice,
environ: environ_slice,
cfg: &self.config,
chroot: &chroot,
pivot: &pivot,
wakeup_pipe: wakeup_rd.take().unwrap().into_fd(),
error_pipe: errpipe_wr.take().unwrap().into_fd(),
fds: &fds,
fd_lookup: &int_fds,
close_fds: &close_fds,
setns_namespaces: &setns_ns,
pid_env_vars: &pid_env_vars,
keep_caps: &self.keep_caps,
pre_exec: &self.pre_exec,
};
child::child_after_clone(&child_info);
}), &mut nstack[..], self.config.namespaces, Some(SIGCHLD as i32)))?;
drop(wakeup_rd);
drop(errpipe_wr); // close pipe so we don't wait for ourself
if let Err(e) = self.after_start(pid, wakeup.unwrap(), errpipe) {
kill(pid, SIGKILL).ok();
loop {
match waitpid(pid, None) {
Err(nix::Error::Sys(EINTR)) => continue,
_ => break,
}
}
return Err(e);
}
let mut outer_fds = ext_fds;
Ok(Child {
pid: pid.into(),
status: None,
stdin: outer_fds.remove(&0).map(|x| {
match x {
PipeHolder::Writer(x) => x,
_ => unreachable!(),
}}),
stdout: outer_fds.remove(&1).map(|x| {
match x {
PipeHolder::Reader(x) => x,
_ => unreachable!(),
}}),
stderr: outer_fds.remove(&2).map(|x| {
match x {
PipeHolder::Reader(x) => x,
_ => unreachable!(),
}}),
fds: outer_fds,
})
}
fn after_start(&mut self, pid: Pid,
mut wakeup: PipeWriter, mut errpipe: PipeReader)
-> Result<(), Error>
{
if self.config.make_group_leader {
result(Err::SetPGid, setpgid(pid, pid))?;
}
if let Some(&(ref uids, ref gids)) = self.config.id_maps.as_ref() {
if let Some(&(ref ucmd, ref gcmd)) = self.id_map_commands.as_ref()
{
let mut cmd = Command::new(ucmd);
cmd.arg(format!("{}", pid));
for map in uids {
cmd.arg(format!("{}", map.inside_uid));
cmd.arg(format!("{}", map.outside_uid));
cmd.arg(format!("{}", map.count));
}
cmd_result(Err::SetIdMap, cmd.status())?;
let mut cmd = Command::new(gcmd);
cmd.arg(format!("{}", pid));
for map in gids {
cmd.arg(format!("{}", map.inside_gid));
cmd.arg(format!("{}", map.outside_gid));
cmd.arg(format!("{}", map.count));
}
cmd_result(Err::SetIdMap, cmd.status())?;
} else {
let mut buf = Vec::new();
for map in uids {
writeln!(&mut buf, "{} {} {}",
map.inside_uid, map.outside_uid, map.count).unwrap();
}
result(Err::SetIdMap,
File::create(format!("/proc/{}/uid_map", pid))
.and_then(|mut f| f.write_all(&buf[..])))?;
let mut buf = Vec::new();
for map in gids {
writeln!(&mut buf, "{} {} {}",
map.inside_gid, map.outside_gid, map.count).unwrap();
}
result(Err::SetIdMap,
File::create(format!("/proc/{}/gid_map", pid))
.and_then(|mut f| f.write_all(&buf[..])))?;
}
}
if let Some(ref mut callback) = self.before_unfreeze {
callback(i32::from(pid) as u32).map_err(Error::BeforeUnfreeze)?;
}
result(Err::PipeError, wakeup.write_all(b"x"))?;
let mut err = [0u8; 6];
match result(Err::PipeError, errpipe.read(&mut err))? {
0 => {} // Process successfully execve'd or dead
5 => {
let code = err[0];
let errno = ((err[1] as i32) << 24) | ((err[2] as i32) << 16) |
((err[3] as i32) << 8) | (err[4] as i32);
return Err(Err::from_i32(code as i32, errno))
}
_ => { return Err(Error::UnknownError); }
}
Ok(())
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/child.rs | src/child.rs | use std::os::unix::io::RawFd;
use std::mem;
use std::ptr;
use libc;
use nix;
use libc::{c_void, c_ulong, sigset_t, size_t};
use libc::{kill, signal};
use libc::{F_GETFD, F_SETFD, F_DUPFD_CLOEXEC, FD_CLOEXEC, MNT_DETACH};
use libc::{SIG_DFL, SIG_SETMASK};
use crate::run::{ChildInfo, MAX_PID_LEN};
use crate::error::ErrorCode as Err;
// And at this point we've reached a special time in the life of the
// child. The child must now be considered hamstrung and unable to
// do anything other than syscalls really.
//
// ESPECIALLY YOU CAN NOT DO MEMORY (DE)ALLOCATIONS
//
// See better explanation at:
// https://github.com/rust-lang/rust/blob/c1e865c/src/libstd/sys/unix/process.rs#L202
//
// In particular ChildInfo is passed by refernce here to avoid
// deallocating (parts of) it.
pub unsafe fn child_after_clone(child: &ChildInfo) -> ! {
let mut epipe = child.error_pipe;
child.cfg.death_sig.as_ref().map(|&sig| {
if libc::prctl(ffi::PR_SET_PDEATHSIG, sig as c_ulong, 0, 0, 0) != 0 {
fail(Err::ParentDeathSignal, epipe);
}
});
// Now we must wait until parent set some environment for us. It's mostly
// for uid_map/gid_map. But also used for attaching debugger and maybe
// other things
let mut wbuf = [0u8];
loop {
// TODO(tailhook) put some timeout on this pipe?
let rc = libc::read(child.wakeup_pipe,
(&mut wbuf).as_ptr() as *mut c_void, 1);
if rc == 0 {
// Parent already dead presumably before we had a chance to
// set PDEATHSIG, so just send signal ourself in that case
if let Some(sig) = child.cfg.death_sig {
kill(libc::getpid(), sig as i32);
libc::_exit(127);
} else {
// In case we wanted to daemonize, just continue
//
// TODO(tailhook) not sure it's best thing to do. Maybe parent
// failed to setup uid/gid map for us. Do we want to check
// specific options? Or should we just always die?
break;
}
} else if rc < 0 {
let errno = nix::errno::errno();
if errno == libc::EINTR as i32 ||
errno == libc::EAGAIN as i32
{
continue;
} else {
fail(Err::PipeError, errno);
}
} else {
// Do we need to check that exactly one byte is received?
break;
}
}
// Move error pipe file descriptors in case they clobber stdio
while epipe < 3 {
let nerr = libc::fcntl(epipe, F_DUPFD_CLOEXEC, 3);
if nerr < 0 {
fail(Err::CreatePipe, epipe);
}
epipe = nerr;
}
for &(nstype, fd) in child.setns_namespaces {
if libc::setns(fd, nstype.bits()) != 0 {
fail(Err::SetNs, epipe);
}
}
if !child.pid_env_vars.is_empty() {
let mut buf = [0u8; MAX_PID_LEN+1];
let data = format_pid_fixed(&mut buf, libc::getpid());
for &(index, offset) in child.pid_env_vars {
// we know that there are at least MAX_PID_LEN+1 bytes in buffer
child.environ[index].offset(offset as isize)
.copy_from(data.as_ptr() as *const libc::c_char, data.len());
}
}
child.pivot.as_ref().map(|piv| {
if ffi::pivot_root(piv.new_root.as_ptr(), piv.put_old.as_ptr()) != 0 {
fail(Err::ChangeRoot, epipe);
}
if libc::chdir(piv.workdir.as_ptr()) != 0 {
fail(Err::ChangeRoot, epipe);
}
if piv.unmount_old_root {
if libc::umount2(piv.old_inside.as_ptr(), MNT_DETACH) != 0 {
fail(Err::ChangeRoot, epipe);
}
}
});
child.chroot.as_ref().map(|chroot| {
if libc::chroot(chroot.root.as_ptr()) != 0 {
fail(Err::ChangeRoot, epipe);
}
if libc::chdir(chroot.workdir.as_ptr()) != 0 {
fail(Err::ChangeRoot, epipe);
}
});
child.keep_caps.as_ref().map(|_| {
// Don't use securebits because on older systems it doesn't work
if libc::prctl(libc::PR_SET_KEEPCAPS, 1, 0, 0, 0) != 0 {
fail(Err::CapSet, epipe);
}
});
child.cfg.gid.as_ref().map(|&gid| {
if libc::setgid(gid) != 0 {
fail(Err::SetUser, epipe);
}
});
child.cfg.supplementary_gids.as_ref().map(|groups| {
if libc::setgroups(groups.len() as size_t, groups.as_ptr()) != 0 {
fail(Err::SetUser, epipe);
}
});
child.cfg.uid.as_ref().map(|&uid| {
if libc::setuid(uid) != 0 {
fail(Err::SetUser, epipe);
}
});
child.keep_caps.as_ref().map(|caps| {
let header = ffi::CapsHeader {
version: ffi::CAPS_V3,
pid: 0,
};
let data = ffi::CapsData {
effective_s0: caps[0],
permitted_s0: caps[0],
inheritable_s0: caps[0],
effective_s1: caps[1],
permitted_s1: caps[1],
inheritable_s1: caps[1],
};
if libc::syscall(libc::SYS_capset, &header, &data) != 0 {
fail(Err::CapSet, epipe);
}
for idx in 0..caps.len()*32 {
if caps[(idx >> 5) as usize] & (1 << (idx & 31)) != 0 {
let rc = libc::prctl(
libc::PR_CAP_AMBIENT,
libc::PR_CAP_AMBIENT_RAISE,
idx, 0, 0);
if rc != 0 && nix::errno::errno() == libc::ENOTSUP {
// no need to iterate if ambient caps are notsupported
break;
}
}
}
});
child.cfg.work_dir.as_ref().map(|dir| {
if libc::chdir(dir.as_ptr()) != 0 {
fail(Err::Chdir, epipe);
}
});
for &(dest_fd, src_fd) in child.fds {
if src_fd == dest_fd {
let flags = libc::fcntl(src_fd, F_GETFD);
if flags < 0 ||
libc::fcntl(src_fd, F_SETFD, flags & !FD_CLOEXEC) < 0
{
fail(Err::StdioError, epipe);
}
} else {
if libc::dup2(src_fd, dest_fd) < 0 {
fail(Err::StdioError, epipe);
}
}
}
for &(start, end) in child.close_fds {
if start < end {
for fd in start..end {
if child.fds.iter().find(|&&(cfd, _)| cfd == fd).is_none() {
// Close may fail with ebadf, and it's okay
libc::close(fd);
}
}
}
}
if child.cfg.restore_sigmask {
let mut sigmask: sigset_t = mem::zeroed();
libc::sigemptyset(&mut sigmask);
libc::pthread_sigmask(SIG_SETMASK, &sigmask, ptr::null_mut());
for sig in 1..32 {
signal(sig, SIG_DFL);
}
}
if let Some(callback) = child.pre_exec {
if let Err(e) = callback() {
fail_errno(Err::PreExec,
e.raw_os_error().unwrap_or(10873289),
epipe);
}
}
libc::execve(child.filename,
child.args.as_ptr(),
// cancelling mutability, it should be fine
child.environ.as_ptr() as *const *const libc::c_char);
fail(Err::Exec, epipe);
}
unsafe fn fail(code: Err, output: RawFd) -> ! {
fail_errno(code, nix::errno::errno(), output)
}
unsafe fn fail_errno(code: Err, errno: i32, output: RawFd) -> ! {
let bytes = [
code as u8,
(errno >> 24) as u8,
(errno >> 16) as u8,
(errno >> 8) as u8,
(errno >> 0) as u8,
// TODO(tailhook) rustc adds a special sentinel at the end of error
// code. Do we really need it? Assuming our pipes are always cloexec'd.
];
// Writes less than PIPE_BUF should be atomic. It's also unclear what
// to do if error happened anyway
libc::write(output, bytes.as_ptr() as *const c_void, 5);
libc::_exit(127);
}
fn format_pid_fixed<'a>(buf: &'a mut [u8], pid: libc::pid_t) -> &'a [u8] {
buf[buf.len()-1] = 0;
if pid == 0 {
buf[buf.len()-2] = b'0';
return &buf[buf.len()-2..]
} else {
let mut tmp = pid;
// can't use stdlib function because that can allocate
for n in (0..buf.len()-1).rev() {
buf[n] = (tmp % 10) as u8 + b'0';
tmp /= 10;
if tmp == 0 {
return &buf[n..];
}
}
unreachable!("can't format pid");
};
}
/// We don't use functions from nix here because they may allocate memory
/// which we can't to this this module.
mod ffi {
use libc::{c_char, c_int};
pub const PR_SET_PDEATHSIG: c_int = 1;
pub const CAPS_V3: u32 = 0x20080522;
#[repr(C)]
pub struct CapsHeader {
pub version: u32,
pub pid: i32,
}
#[repr(C)]
pub struct CapsData {
pub effective_s0: u32,
pub permitted_s0: u32,
pub inheritable_s0: u32,
pub effective_s1: u32,
pub permitted_s1: u32,
pub inheritable_s1: u32,
}
extern {
pub fn pivot_root(new_root: *const c_char, put_old: *const c_char)
-> c_int;
}
}
#[cfg(test)]
mod test {
use rand::{thread_rng, Rng};
use crate::run::MAX_PID_LEN;
use std::ffi::CStr;
use super::format_pid_fixed;
fn fmt_normal(val: i32) -> String {
let mut buf = [0u8; MAX_PID_LEN+1];
let slice = format_pid_fixed(&mut buf, val);
return CStr::from_bytes_with_nul(slice).unwrap()
.to_string_lossy().to_string();
}
#[test]
fn test_format() {
assert_eq!(fmt_normal(0), "0");
assert_eq!(fmt_normal(1), "1");
assert_eq!(fmt_normal(7), "7");
assert_eq!(fmt_normal(79), "79");
assert_eq!(fmt_normal(254), "254");
assert_eq!(fmt_normal(1158), "1158");
assert_eq!(fmt_normal(77839), "77839");
}
#[test]
fn test_random() {
for _ in 0..100000 {
let x = thread_rng().gen();
if x < 0 { continue; }
assert_eq!(fmt_normal(x), format!("{}", x));
}
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/pipe.rs | src/pipe.rs | use std::io;
use std::mem;
use std::os::unix::io::{RawFd};
use nix::unistd::pipe2;
use nix::fcntl::OFlag;
use libc;
use libc::{c_void, size_t};
use crate::error::{result, Error};
use crate::error::ErrorCode::CreatePipe;
/// A pipe used to communicate with subprocess
#[derive(Debug)]
pub struct Pipe(RawFd, RawFd);
/// A reading end of `Pipe` object after `Pipe::split`
#[derive(Debug)]
pub struct PipeReader(RawFd);
/// A writing end of `Pipe` object after `Pipe::split`
#[derive(Debug)]
pub struct PipeWriter(RawFd);
#[derive(Debug)]
pub enum PipeHolder {
Reader(PipeReader),
Writer(PipeWriter),
}
impl Pipe {
pub fn new() -> Result<Pipe, Error> {
let (rd, wr) = result(CreatePipe, pipe2(OFlag::O_CLOEXEC))?;
Ok(Pipe(rd, wr))
}
pub fn split(self) -> (PipeReader, PipeWriter) {
let Pipe(rd, wr) = self;
mem::forget(self);
(PipeReader(rd), PipeWriter(wr))
}
}
impl Drop for Pipe {
fn drop(&mut self) {
let Pipe(x, y) = *self;
unsafe {
libc::close(x);
libc::close(y);
}
}
}
impl PipeReader {
/// Extract file descriptor from pipe reader without closing
// TODO(tailhook) implement IntoRawFd here
pub fn into_fd(self) -> RawFd {
let PipeReader(fd) = self;
mem::forget(self);
return fd;
}
}
impl PipeWriter {
/// Extract file descriptor from pipe reader without closing
// TODO(tailhook) implement IntoRawFd here
pub fn into_fd(self) -> RawFd {
let PipeWriter(fd) = self;
mem::forget(self);
return fd;
}
}
impl Drop for PipeReader {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}
impl Drop for PipeWriter {
fn drop(&mut self) {
unsafe { libc::close(self.0) };
}
}
impl io::Read for PipeReader {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let ret = unsafe {
libc::read(self.0,
buf.as_mut_ptr() as *mut c_void,
buf.len() as size_t)
};
if ret < 0 {
return Err(io::Error::last_os_error());
}
Ok(ret as usize)
}
}
impl io::Write for PipeWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let ret = unsafe {
libc::write(self.0,
buf.as_ptr() as *const c_void,
buf.len() as size_t)
};
if ret < 0 {
return Err(io::Error::last_os_error());
}
Ok(ret as usize)
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/fds.rs | src/fds.rs | use std::mem::zeroed;
use std::ops::{Range, RangeTo, RangeFrom, RangeFull};
use std::os::unix::io::RawFd;
use nix::errno::errno;
use libc::getrlimit;
use libc::RLIMIT_NOFILE;
use crate::stdio::{Fd};
use crate::Command;
/// This is just a temporary enum to coerce `std::ops::Range*` variants
/// into single value for convenience. Used in `close_fds` method.
pub enum AnyRange {
RangeFrom(RawFd),
Range(RawFd, RawFd),
}
impl Command {
/// Configuration for any other file descriptor (panics for fds < 3) use
/// stdin/stdout/stderr for them
///
/// Rust creates file descriptors with CLOEXEC flag by default, so no
/// descriptors are inherited except ones specifically configured here
/// (and stdio which is inherited by default)
pub fn file_descriptor(&mut self, target_fd: RawFd, cfg: Fd)
-> &mut Command
{
if target_fd <= 2 {
panic!("Stdio file descriptors must be configured with respective \
methods instead of passing fd {} to `file_descritor()`",
target_fd)
}
self.fds.insert(target_fd, cfg);
self
}
/// Close a range of file descriptors as soon as process forks
///
/// Subsequent calls to this method add additional range. Use `reset_fds`
/// to remove all the ranges.
///
/// File descriptors that never closed are:
///
/// * the stdio file descriptors
/// * descriptors configured using `file_descriptor`/`file_descriptor_raw`
/// methods
/// * internal file descriptors used for parent child notification by
/// unshare crate itself (they are guaranteed to have CLOEXEC)
///
/// You should avoid this method if possilble and rely on CLOEXEC to
/// do the work. But sometimes it's inevitable:
///
/// 1. If you need to ensure closing descriptors for security reasons
/// 2. If you have some bad library out of your control which doesn't
/// set CLOEXEC on owned the file descriptors
///
/// Ranges obey the following rules:
///
/// * Range like `..12` is transformed into `3..12`
/// * Range with undefined upper bound `3..` is capped at current ulimit
/// for file descriptors **at the moment of calling the method**
/// * The full range `..` is an alias to `3..`
/// * Multiple overlapping ranges are closed multiple times which is
/// both harmless and useless
///
/// # Panics
///
/// Panics when can't get rlimit for range without upper bound. Should
/// never happen in practice.
///
/// Panics when lower range of fd is < 3 (stdio file descriptors)
///
pub fn close_fds<A: Into<AnyRange>>(&mut self, range: A)
-> &mut Command
{
self.close_fds.push(match range.into() {
AnyRange::Range(x, y) => {
assert!(x >= 3);
(x, y)
}
AnyRange::RangeFrom(x) => unsafe {
assert!(x >= 3);
let mut rlim = zeroed();
let rc = getrlimit(RLIMIT_NOFILE, &mut rlim);
if rc < 0 {
panic!("Can't get rlimit: errno {}", errno());
}
(x, rlim.rlim_cur as RawFd)
}
});
self
}
/// Reset file descriptor including stdio to the initial state
///
/// Initial state is inherit all the stdio and do nothing to other fds.
pub fn reset_fds(&mut self) -> &mut Command {
self.fds = vec![
(0, Fd::inherit()),
(1, Fd::inherit()),
(2, Fd::inherit()),
].into_iter().collect();
self.close_fds.clear();
self
}
}
impl Into<AnyRange> for Range<RawFd> {
fn into(self) -> AnyRange {
return AnyRange::Range(self.start, self.end);
}
}
impl Into<AnyRange> for RangeTo<RawFd> {
fn into(self) -> AnyRange {
return AnyRange::Range(3, self.end);
}
}
impl Into<AnyRange> for RangeFrom<RawFd> {
fn into(self) -> AnyRange {
return AnyRange::RangeFrom(self.start);
}
}
impl Into<AnyRange> for RangeFull {
fn into(self) -> AnyRange {
return AnyRange::RangeFrom(3);
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/namespace.rs | src/namespace.rs | use nix::sched::CloneFlags;
/// Namespace name to unshare
///
/// See `man 7 namespaces` for more information
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub enum Namespace {
/// Unshare the mount namespace. It basically means that you can now mount
/// and unmount folders without touching parent mount points.
///
/// But note that you also have to make all your mountpoints non-shareable
/// or changes will be propagated to parent namespace anyway.
///
/// This is always needed if you want `pivot_root` (but not enforced by
/// library)
Mount,
/// Unshare the UTS namespace. This allows you to change hostname of the
/// new container.
Uts,
/// Unshare the IPC namespace. This creates new namespace for System V IPC
/// POSIX message queues and similar.
Ipc,
/// Unshare user namespace. This allows unprivileged user to be root
/// user in new namespace and/or change mappings between real (outer)
/// user namespace and the inner one.
///
/// This one is required if you want to unshare any other namespace without
/// root privileges (it's not enforced by kernel not the library)
///
/// See `man 7 user_namespaces` for more information.
User,
/// Unshare pid namespace. The child process becomes PID 1 (inside
/// container) with the following rough list of consequences:
///
/// 1. All daemon processes are reparented to the process
/// 2. All signal dispositions are set to `Ignore`. E.g. process doesn't
/// get killed by `SIGINT` (Ctrl+C), unless signal handler is explicitly
/// set
/// 3. If the process is dead, all its children are killed by `SIGKILL`
/// (i.e. can't catch the death signal)
///
/// All this means that most of the time the new process having this
/// namespace must be some kind of process supervisor.
///
/// Also take a note that `/proc` is not automatically changed. So you
/// should also unshare `Mount` namespace and mount new `/proc` inside the
/// PID namespace.
///
/// See `man 7 pid_namespaces` for more information
Pid,
/// Unshare network namespace
///
/// New namespace is empty and has no conectivity, even localhost network,
/// unless some setup is done afterwards.
///
/// Note that unix sockets continue to work, but "abstract unix sockets"
/// are isolated as a result of this option. The availability of unix
/// sockets might also mean that libc is able to resolve DNS names by using
/// NSCD. You may isolate unix sockets by using any kind of filesystem
/// isolation.
Net,
/// Cgroup namespace
///
/// Creates a new namespace for CGroups.
///
/// See `man 7 cgroup_namespaces` for more information
Cgroup,
}
/// Convert namespace to a clone flag passed to syscalls
// TODO(tailhook) should this method be private?
pub fn to_clone_flag(ns: Namespace) -> CloneFlags {
match ns {
Namespace::Mount => CloneFlags::CLONE_NEWNS,
Namespace::Uts => CloneFlags::CLONE_NEWUTS,
Namespace::Ipc => CloneFlags::CLONE_NEWIPC,
Namespace::User => CloneFlags::CLONE_NEWUSER,
Namespace::Pid => CloneFlags::CLONE_NEWPID,
Namespace::Net => CloneFlags::CLONE_NEWNET,
Namespace::Cgroup => CloneFlags::CLONE_NEWCGROUP,
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/src/zombies.rs | src/zombies.rs | use std::marker::PhantomData;
use libc::pid_t;
use nix::sys::wait::{waitpid};
use nix::sys::wait::WaitPidFlag;
use nix::errno::Errno::{EINTR, ECHILD};
use nix::Error;
use crate::{ExitStatus, Signal};
/// A non-blocking iteration over zombie processes
///
/// Use `reap_zombies()` to create one, and read docs there
pub struct ZombieIterator(PhantomData<u8>);
impl Iterator for ZombieIterator {
type Item = (pid_t, ExitStatus);
fn next(&mut self) -> Option<(pid_t, ExitStatus)> {
use nix::sys::wait::WaitStatus::*;
loop {
match waitpid(None, Some(WaitPidFlag::WNOHANG)) {
Ok(PtraceEvent(..)) => {}
Ok(PtraceSyscall(..)) => {}
Ok(Exited(pid, status)) => {
return Some((pid.into(), ExitStatus::Exited(status as i8)));
}
Ok(Signaled(pid, sig, core)) => {
return Some((pid.into(), ExitStatus::Signaled(sig, core)));
}
Ok(Stopped(_, _)) => continue,
Ok(Continued(_)) => continue,
Ok(StillAlive) => return None,
Err(Error::Sys(EINTR)) => continue,
Err(Error::Sys(ECHILD)) => return None,
Err(e) => {
panic!("Unexpected waitpid error: {:?}", e);
}
}
}
}
}
/// Creates iterator over zombie processes
///
/// On each iteration it calls `waitpid()` and returns child pid and exit
/// status if there is zombie process. The operation is non-blocking. The
/// iterator is exhausted when there are no zombie process at the moment,
///
/// Alternatively see a more comprehensive `child_events()` function.
///
/// # Example
///
/// So waiting for all processes to finish may look like this:
///
/// ```ignore
/// while alive.len() > 0 {
/// sigwait()
/// for (pid, status) in zombies() {
/// alive.remove(pid);
/// }
/// }
/// ```
///
/// # Important Notes
///
/// * If you are using this function you can't reliably use `Child::wait`
/// any more.
/// * If you got `SIGCHLD` you *must* exhaust this iterator until waiting for
/// next signal, or you will have zombie processes around
pub fn reap_zombies() -> ZombieIterator { ZombieIterator(PhantomData) }
/// The event returned from `child_events()` iterator
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ChildEvent {
/// Child is dead, similar to what returned by `reap_zombies()`
Death(pid_t, ExitStatus),
/// Child is stopped on a signal Signal
Stop(pid_t, Signal),
/// Child is continued (SIGCONT sent)
Continue(pid_t),
}
/// A non-blocking iteration over zombies and child stops
///
/// Use `child_events()` to create one, and read docs there
pub struct ChildEventsIterator(PhantomData<u8>);
impl Iterator for ChildEventsIterator {
type Item = ChildEvent;
fn next(&mut self) -> Option<ChildEvent> {
use self::ChildEvent::*;
use nix::sys::wait::WaitStatus::*;
let flags = WaitPidFlag::WNOHANG | WaitPidFlag::WUNTRACED |
WaitPidFlag::WCONTINUED;
loop {
match waitpid(None, Some(flags)) {
Ok(PtraceEvent(..)) => {}
Ok(PtraceSyscall(..)) => {}
Ok(Exited(pid, status)) => {
return Some(Death(pid.into(),
ExitStatus::Exited(status as i8)));
}
Ok(Signaled(pid, sig, core)) => {
return Some(Death(pid.into(),
ExitStatus::Signaled(sig, core)));
}
Ok(Stopped(pid, sig)) => return Some(Stop(pid.into(), sig)),
Ok(Continued(pid)) => return Some(Continue(pid.into())),
Ok(StillAlive) => return None,
Err(Error::Sys(EINTR)) => continue,
Err(Error::Sys(ECHILD)) => return None,
Err(e) => {
panic!("Unexpected waitpid error: {:?}", e);
}
}
}
}
}
/// Creates iterator over child events
///
/// On each iteration it calls `waitpid()` and returns one of the
/// events described in `ChildEvent`.
///
/// The operation is non-blocking. The iterator is exhausted when there are no
/// zombie process at the moment.
///
/// Alternatively see a simpler `reap_zombies()` function.
///
/// # Example
///
/// So waiting for all processes to finish may look like this:
///
/// ```ignore
/// while alive.len() > 0 {
/// sigwait()
/// for event in zombies() {
/// match event {
/// Death(pid, _) => alive.remove(pid),
/// Stop(..) => {}
/// Continue(..) => {}
/// }
/// }
/// ```
///
/// # Important Notes
///
/// * If you are using this function you can't reliably use `Child::wait`
/// any more.
/// * If you got `SIGCHLD` you *must* exhaust this iterator until waiting for
/// next signal, or you will have zombie processes around
pub fn child_events() -> ChildEventsIterator {
ChildEventsIterator(PhantomData)
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/examples/echo.rs | examples/echo.rs | extern crate unshare;
use std::process::exit;
fn main() {
let mut cmd = unshare::Command::new("/bin/echo");
cmd.arg("hello");
cmd.arg("world!");
match cmd.status().unwrap() {
// propagate signal
unshare::ExitStatus::Exited(x) => exit(x as i32),
unshare::ExitStatus::Signaled(x, _) => exit((128+x as i32) as i32),
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
tailhook/unshare | https://github.com/tailhook/unshare/blob/6cdc15d97aca90f59d1427e01da4c461184d0fe4/examples/runcmd.rs | examples/runcmd.rs | extern crate unshare;
extern crate argparse;
extern crate libc;
use std::io::{stderr, Write, Read};
use std::process::exit;
use std::path::PathBuf;
use unshare::Namespace;
use libc::{uid_t, gid_t};
use argparse::{ArgumentParser, Store, StoreOption, Collect, StoreTrue};
use argparse::{ParseOption, PushConst};
fn main() {
let mut command = "".to_string();
let mut args: Vec<String> = Vec::new();
let mut alias = None::<String>;
let mut workdir = None::<String>;
let mut verbose = false;
let mut escape_stdout = false;
let mut uid = None::<uid_t>;
let mut gid = None::<gid_t>;
let mut chroot = None::<PathBuf>;
let mut namespaces = Vec::<Namespace>::new();
let mut groups = Vec::<gid_t>::new();
let mut pid_env_var = None::<String>;
{ // this block limits scope of borrows by ap.refer() method
let mut ap = ArgumentParser::new();
ap.set_description("Run command with changed process state");
ap.refer(&mut command)
.add_argument("command", Store, "Command to run")
.required();
ap.refer(&mut args)
.add_argument("arg", Collect, "Arguments for the command")
.required();
ap.refer(&mut workdir)
.add_option(&["--work-dir"], StoreOption, "
Set working directory of the command");
ap.refer(&mut verbose)
.add_option(&["-v", "--verbose"], StoreTrue, "
Enable verbose mode (prints command, pid, exit status)");
ap.refer(&mut escape_stdout)
.add_option(&["--escape-stdout"], StoreTrue, "
Read data written by the utility to stdout and print it back
as a quoted string with binary data escaped");
ap.refer(&mut uid)
.add_option(&["-U", "--uid"], StoreOption, "
Set user id for the target process");
ap.refer(&mut gid)
.add_option(&["-G", "--gid"], StoreOption, "
Set group id for the target process");
ap.refer(&mut groups)
.add_option(&["--add-group"], Collect, "
Add supplementary group id");
ap.refer(&mut chroot)
.add_option(&["--chroot"], ParseOption, "
Chroot to directory before running command");
ap.refer(&mut alias)
.add_option(&["--alias", "--arg0"], ParseOption, "
Set alias of the command
(passed as `argv[0]` to the program)");
ap.refer(&mut pid_env_var)
.add_option(&["--env-var-with-pid"], ParseOption, "
Add environment variable with pid")
.metavar("ENV_VAR_NAME");
ap.refer(&mut namespaces)
.add_option(&["--unshare-pid"], PushConst(Namespace::Pid),
"Unshare pid namespace")
.add_option(&["--unshare-net"], PushConst(Namespace::Net),
"Unshare net namespace")
.add_option(&["--unshare-mount"], PushConst(Namespace::Mount),
"Unshare mount namespace")
.add_option(&["--unshare-uts"], PushConst(Namespace::Uts),
"Unshare UTS namespace")
.add_option(&["--unshare-ipc"], PushConst(Namespace::Ipc),
"Unshare IPC namespace")
.add_option(&["--unshare-user"], PushConst(Namespace::User),
"Unshare user namespace");
ap.stop_on_first_argument(true);
ap.parse_args_or_exit();
}
let mut cmd = unshare::Command::new(&command);
cmd.args(&args[..]);
alias.map(|x| cmd.arg0(x));
workdir.map(|dir| cmd.current_dir(dir));
gid.map(|gid| cmd.gid(gid));
uid.map(|uid| cmd.uid(uid));
chroot.map(|dir| cmd.chroot_dir(dir));
cmd.unshare(&namespaces);
cmd.close_fds(..);
if groups.len() > 0 { cmd.groups(groups); }
if escape_stdout {
cmd.stdout(unshare::Stdio::piped());
}
if let Some(var) = pid_env_var {
cmd.env_var_with_pid(var);
}
if verbose {
// TODO(tailhook) implement display/debug in Command itself
writeln!(&mut stderr(), "Command {:?}", cmd).ok();
}
let mut child = match cmd.spawn() {
Ok(child) => { child }
Err(e) => {
writeln!(&mut stderr(), "Error: {}", e).ok();
exit(127);
}
};
if verbose {
writeln!(&mut stderr(), "Child pid {}", child.id()).ok();
}
if escape_stdout {
let mut buf = Vec::new();
child.stdout.take().unwrap().read_to_end(&mut buf).unwrap();
writeln!(&mut stderr(), "{:?}",
String::from_utf8_lossy(&buf[..])).unwrap();
}
let res = child.wait().unwrap();
if verbose {
writeln!(&mut stderr(), "[pid {}] {}", child.id(), res).ok();
}
}
| rust | Apache-2.0 | 6cdc15d97aca90f59d1427e01da4c461184d0fe4 | 2026-01-04T20:21:52.548549Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/examples/rust/lib.rs | examples/rust/lib.rs | pub mod getting_started;
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/examples/rust/snippets/getting_started.rs | examples/rust/snippets/getting_started.rs | use anyhow::{Result};
use bip39::{Language, Mnemonic};
use gl_client::{
bitcoin::Network,
credentials::{Device, Nobody},
node::ClnClient,
pb::{cln, cln::{amount_or_any, Amount, AmountOrAny}},
scheduler::Scheduler,
signer::Signer,
};
use rand::RngCore;
use std::{env, fs, path::PathBuf};
use tokio;
const NETWORK: Network = Network::Regtest;
const TEST_NODE_DATA_DIR: &str = "/tmp/gltests/node2";
fn save_to_file(file_name: &str, data: &[u8]) -> Result<()> {
let path = PathBuf::from(TEST_NODE_DATA_DIR).join(file_name);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)?;
}
fs::write(&path, data)?;
Ok(())
}
fn read_file(file_name: &str) -> Result<Vec<u8>> {
let path = PathBuf::from(TEST_NODE_DATA_DIR).join(file_name);
Ok(fs::read(path)?)
}
async fn upgrade_device_certs_to_creds(
scheduler: &Scheduler<Nobody>,
signer: &Signer,
creds_path: &str,
) -> Result<Device> {
// ---8<--- [start: upgrade_device_certs_to_creds]
let device = Device::from_path(creds_path);
let upgraded = device.upgrade(scheduler, signer).await?;
save_to_file("credentials_upgraded.gfs", &upgraded.to_bytes())?;
// ---8<--- [end: upgrade_device_certs_to_creds]
Ok(upgraded)
}
fn create_seed() -> Result<Vec<u8>> {
// ---8<--- [start: create_seed]
let mut rng = rand::thread_rng();
let mut entropy = [0u8; 32];
rng.fill_bytes(&mut entropy);
// Seed phrase for user
let mnemonic = Mnemonic::from_entropy_in(Language::English, &entropy)?;
let _phrase = mnemonic.words().collect::<Vec<_>>().join(" ");
const EMPTY_PASSPHRASE: &str = "";
let seed = &mnemonic.to_seed(EMPTY_PASSPHRASE)[0..32]; // Only need the first 32 bytes
// Store the seed on the filesystem, or secure configuration system
save_to_file("hsm_secret", seed)?;
// ---8<--- [end: create_seed]
Ok(seed.to_vec())
}
fn load_developer_creds() -> Result<Nobody> {
// ---8<--- [start: dev_creds]
let developer_cert_path = env::var("GL_NOBODY_CRT")?;
let developer_key_path = env::var("GL_NOBODY_KEY")?;
let developer_cert = std::fs::read(developer_cert_path).unwrap_or_default();
let developer_key = std::fs::read(developer_key_path).unwrap_or_default();
let developer_creds = Nobody {
cert: developer_cert,
key: developer_key,
..Nobody::default()
};
// ---8<--- [end: dev_creds]
Ok(developer_creds)
}
async fn register_node(seed: Vec<u8>, developer_creds: Nobody) -> Result<(Scheduler<Nobody>, Device, Signer)> {
// ---8<--- [start: init_signer]
let signer = Signer::new(seed.clone(), NETWORK, developer_creds.clone())?;
// ---8<--- [end: init_signer]
// ---8<--- [start: register_node]
let scheduler = Scheduler::new(NETWORK, developer_creds).await?;
// Passing in the signer is required because the client needs to prove
// ownership of the `node_id`
let registration_response = scheduler.register(&signer, None).await?;
// ---8<--- [start: device_creds]
let device_creds = Device::from_bytes(registration_response.creds);
save_to_file("credentials.gfs", &device_creds.to_bytes())?;
// ---8<--- [end: device_creds]
// ---8<--- [end: register_node]
Ok((scheduler, device_creds, signer))
}
async fn get_node(scheduler: &Scheduler<Device>) -> Result<ClnClient> {
// ---8<--- [start: get_node]
let node = scheduler.node().await?;
// ---8<--- [end: get_node]
Ok(node)
}
async fn start_node(device_creds_file_path: &str) -> Result<(cln::GetinfoResponse, cln::ListpeersResponse, cln::InvoiceResponse)> {
// ---8<--- [start: start_node]
let creds = Device::from_path(device_creds_file_path);
let scheduler = Scheduler::new(NETWORK, creds.clone()).await?;
let mut node: ClnClient = scheduler.node().await?;
// ---8<--- [end: start_node]
// ---8<--- [start: list_peers]
let info = node.getinfo(cln::GetinfoRequest::default()).await?;
let info = info.into_inner();
let peers = node.list_peers(cln::ListpeersRequest::default()).await?;
let peers = peers.into_inner();
// ---8<--- [end: list_peers]
// ---8<--- [start: start_signer]
let seed = read_file("hsm_secret")?;
let signer = Signer::new(seed, NETWORK, creds.clone())?;
let (_tx, rx) = tokio::sync::mpsc::channel(1);
tokio::spawn(async move {
signer.run_forever(rx).await.unwrap();
});
// ---8<--- [end: start_signer]
// ---8<--- [start: create_invoice]
let amount = AmountOrAny {
value: Some(amount_or_any::Value::Amount(Amount { msat: 10_000 })),
};
let invoice = node
.invoice(cln::InvoiceRequest {
amount_msat: Some(amount),
description: format!("desc_{}", rand::random::<u32>()),
label: format!("label_{}", rand::random::<u32>()),
..Default::default()
})
.await?;
let invoice = invoice.into_inner();
// ---8<--- [end: create_invoice]
Ok((info, peers, invoice))
}
async fn recover_node(dev_creds: Nobody) -> Result<(Scheduler<Nobody>, Device, Signer)> {
// ---8<--- [start: recover_node]
let seed = read_file("hsm_secret")?;
let signer = Signer::new(seed.clone(), NETWORK, dev_creds.clone())?;
let scheduler = Scheduler::new(NETWORK, dev_creds).await?;
let recover_response = scheduler.recover(&signer).await?;
// ---8<--- [end: recover_node]
let device_creds = Device::from_bytes(recover_response.creds);
save_to_file("credentials.gfs", &device_creds.to_bytes())?;
Ok((scheduler, device_creds, signer))
}
#[tokio::main]
async fn main() -> Result<()> {
println!("Creating seed...");
let seed = create_seed()?;
println!("Loading developer credentials...");
let developer_creds = load_developer_creds()?;
println!("Registering node...");
let (scheduler, device_creds, signer) = register_node(seed, developer_creds.clone()).await?;
println!("Node Registered!");
println!("Getting node information...");
let device_scheduler = Scheduler::new(NETWORK, device_creds.clone()).await?;
let _gl_node = get_node(&device_scheduler).await?;
let (info, peers, invoice) = start_node(&format!("{TEST_NODE_DATA_DIR}/credentials.gfs")).await?;
println!("Node pubkey: {}", hex::encode(info.id));
println!("Peers list: {:?}", peers.peers);
println!("Invoice created: {}", invoice.bolt11);
println!("Upgrading certs...");
let _upgraded = upgrade_device_certs_to_creds(&scheduler, &signer, &format!("{TEST_NODE_DATA_DIR}/credentials.gfs")).await?;
println!("Recovering node...");
let (_scheduler2, _device_creds2, _signer2) = recover_node(developer_creds.clone()).await?;
println!("Node Recovered!");
let (info, _peers, _invoice) = start_node(&format!("{TEST_NODE_DATA_DIR}/credentials.gfs")).await?;
println!("Node pubkey: {}", hex::encode(info.id));
println!("All steps completed successfully!");
Ok(())
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/signer.rs | libs/gl-client-py/src/signer.rs | use crate::credentials::Credentials;
use gl_client::bitcoin::Network;
use log::warn;
use pyo3::{exceptions::PyValueError, prelude::*};
use tokio::sync::mpsc;
#[pyclass]
#[derive(Clone)]
pub struct Signer {
pub(crate) inner: gl_client::signer::Signer,
}
#[pymethods]
impl Signer {
#[new]
fn new(secret: Vec<u8>, network: String, creds: Credentials) -> PyResult<Signer> {
let network: Network = match network.parse() {
Ok(network) => network,
Err(_) => {
return Err(pyo3::exceptions::PyValueError::new_err(format!(
"Unknown / unsupported network {}",
network
)))
}
};
let inner = match gl_client::signer::Signer::new(secret, network, creds.inner) {
Ok(v) => v,
Err(e) => {
return Err(pyo3::exceptions::PyValueError::new_err(format!(
"Error initializing Signer: {}",
e
)))
}
};
Ok(Signer { inner })
}
fn run_in_thread(&mut self) -> PyResult<SignerHandle> {
trace!("Starting a new thread for signer");
let inner = self.inner.clone();
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
let (tx, rx) = mpsc::channel(1);
std::thread::spawn(move || {
runtime.block_on(async move {
if let Err(e) = inner.run_forever(rx).await {
log::error!("Error running signer in thread: {e}")
}
})
});
Ok(SignerHandle { signal: tx })
}
fn run_in_foreground(&self) -> PyResult<()> {
trace!("Running signer in foreground thread");
let (_tx, rx) = mpsc::channel(1);
let res = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(async { self.inner.run_forever(rx).await });
match res {
Ok(_) => Ok(()),
Err(e) => Err(pyo3::exceptions::PyValueError::new_err(format!(
"Error running Signer: {}",
e
))),
}
}
fn node_id(&self) -> Vec<u8> {
self.inner.node_id()
}
fn init(&self) -> Vec<u8> {
self.inner.get_init()
}
fn bip32_key(&self) -> Vec<u8> {
self.inner.get_init()[35..].to_vec()
}
fn sign_challenge(&self, challenge: Vec<u8>) -> PyResult<Vec<u8>> {
match self.inner.sign_challenge(challenge) {
Ok(v) => Ok(v),
Err(e) => Err(pyo3::exceptions::PyValueError::new_err(e.to_string())),
}
}
fn version(&self) -> PyResult<&'static str> {
Ok(self.inner.version())
}
fn create_rune(&self, restrictions: Vec<Vec<&str>>, rune: Option<&str>) -> PyResult<String> {
self.inner
.create_rune(rune, restrictions)
.map_err(|e| PyValueError::new_err(e.to_string()))
}
}
#[pyclass]
#[derive(Clone)]
pub struct SignerHandle {
pub(crate) signal: mpsc::Sender<()>,
}
#[pymethods]
impl SignerHandle {
fn shutdown(&self) -> PyResult<()> {
if let Err(e) = self.signal.try_send(()) {
warn!("Failed to send shutdown signal, signer may already be stopped: {e}");
}
Ok(())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/tls.rs | libs/gl-client-py/src/tls.rs | use gl_client::tls;
use pyo3::exceptions::PyFileNotFoundError;
use pyo3::prelude::*;
#[pyclass]
#[derive(Clone)]
pub struct TlsConfig {
pub(crate) inner: tls::TlsConfig,
}
#[pymethods]
impl TlsConfig {
#[new]
fn new() -> PyResult<TlsConfig> {
let inner = tls::TlsConfig::new();
Ok(Self { inner })
}
fn identity(&self, cert_pem: Vec<u8>, key_pem: Vec<u8>) -> Self {
Self {
inner: self.inner.clone().identity(cert_pem, key_pem),
}
}
fn identity_from_path(&self, path: &str) -> Result<Self, PyErr> {
let result = Self {
inner: self
.inner
.clone()
.identity_from_path(path)
.map_err(|_| PyFileNotFoundError::new_err(String::from(path)))?,
};
return Ok(result);
}
fn with_ca_certificate(&self, ca: Vec<u8>) -> TlsConfig {
Self {
inner: self.inner.clone().ca_certificate(ca),
}
}
fn ca_certificate(&self) -> Vec<u8> {
self.inner.ca.clone()
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/node.rs | libs/gl-client-py/src/node.rs | use crate::credentials::Credentials;
use crate::runtime::exec;
use crate::scheduler::convert;
use gl_client as gl;
use gl_client::pb;
use prost::Message;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use tonic::{Code, Status};
#[pyclass]
pub struct Node {
client: gl::node::Client,
gclient: gl::node::GClient,
cln_client: gl::node::ClnClient,
}
#[pymethods]
impl Node {
#[new]
fn new(node_id: Vec<u8>, grpc_uri: String, creds: Credentials) -> PyResult<Self> {
creds.ensure_device()?;
let inner = gl::node::Node::new(node_id, creds.inner)
.map_err(|s| PyValueError::new_err(s.to_string()))?;
node_from_inner(inner, grpc_uri)
}
fn call(&self, method: &str, payload: Vec<u8>) -> PyResult<Vec<u8>> {
exec(self.gclient.clone().call(method, payload))
.map(|x| x.into_inner().to_vec())
.map_err(|s| PyValueError::new_err(format!("Error calling {}: {}", method, s)))
}
fn stream_log(&self, args: &[u8]) -> PyResult<LogStream> {
let req = pb::StreamLogRequest::decode(args).map_err(error_decoding_request)?;
let stream = exec(self.client.clone().stream_log(req))
.map(|x| x.into_inner())
.map_err(error_starting_stream)?;
Ok(LogStream { inner: stream })
}
fn stream_incoming(&self, args: &[u8]) -> PyResult<IncomingStream> {
let req = pb::StreamIncomingFilter::decode(args).map_err(error_decoding_request)?;
let stream = exec(self.client.clone().stream_incoming(req))
.map(|x| x.into_inner())
.map_err(error_starting_stream)?;
Ok(IncomingStream { inner: stream })
}
fn stream_custommsg(&self, args: &[u8]) -> PyResult<CustommsgStream> {
let req = pb::StreamCustommsgRequest::decode(args).map_err(error_decoding_request)?;
let stream = exec(self.client.clone().stream_custommsg(req))
.map(|x| x.into_inner())
.map_err(error_starting_stream)?;
Ok(CustommsgStream { inner: stream })
}
fn trampoline_pay(
&self,
bolt11: String,
trampoline_node_id: Vec<u8>,
amount_msat: Option<u64>,
label: Option<String>,
maxfeepercent: Option<f32>,
maxdelay: Option<u32>,
description: Option<String>,
) -> PyResult<Vec<u8>> {
let req = pb::TrampolinePayRequest {
bolt11,
trampoline_node_id,
amount_msat: amount_msat.unwrap_or_default(),
label: label.unwrap_or_default(),
maxfeepercent: maxfeepercent.unwrap_or_default(),
maxdelay: maxdelay.unwrap_or_default(),
description: description.unwrap_or_default(),
};
let res = exec(async { self.client.clone().trampoline_pay(req).await })
.map_err(error_calling_remote_method)?
.into_inner();
convert(Ok(res))
}
fn configure(&self, payload: &[u8]) -> PyResult<()> {
let req = pb::GlConfig::decode(payload).map_err(error_decoding_request)?;
exec(self.client.clone().configure(req))
.map(|x| x.into_inner())
.map_err(error_calling_remote_method)?;
return Ok(());
}
fn lsps_invoice(
&self,
label: String,
description: String,
amount_msat: Option<u64>,
token: Option<String>,
) -> PyResult<Vec<u8>> {
let req = pb::LspInvoiceRequest {
amount_msat: amount_msat.unwrap_or_default(),
description: description,
label: label,
lsp_id: "".to_owned(),
token: token.unwrap_or_default(),
};
let res = exec(async { self.client.clone().lsp_invoice(req).await })
.map_err(error_calling_remote_method)
.map(|x| x.into_inner())?;
convert(Ok(res))
}
}
fn error_decoding_request<D: core::fmt::Display>(e: D) -> PyErr {
PyValueError::new_err(format!("error decoding request: {}", e))
}
pub fn error_calling_remote_method<D: core::fmt::Display>(e: D) -> PyErr {
PyValueError::new_err(format!("error calling remote method: {}", e))
}
fn error_starting_stream<D: core::fmt::Display>(e: D) -> PyErr {
PyValueError::new_err(format!("Error starting stream: {}", e))
}
#[pyclass]
struct LogStream {
inner: tonic::codec::Streaming<pb::LogEntry>,
}
#[pymethods]
impl LogStream {
fn next(&mut self) -> PyResult<Option<Vec<u8>>> {
convert_stream_entry(exec(async { self.inner.message().await }))
}
}
#[pyclass]
struct IncomingStream {
inner: tonic::codec::Streaming<pb::IncomingPayment>,
}
#[pymethods]
impl IncomingStream {
fn next(&mut self) -> PyResult<Option<Vec<u8>>> {
convert_stream_entry(exec(async { self.inner.message().await }))
}
}
#[pyclass]
struct CustommsgStream {
inner: tonic::codec::Streaming<pb::Custommsg>,
}
#[pymethods]
impl CustommsgStream {
fn next(&mut self) -> PyResult<Option<Vec<u8>>> {
convert_stream_entry(exec(async { self.inner.message().await }))
}
}
fn convert_stream_entry<T: Message>(r: Result<Option<T>, Status>) -> PyResult<Option<Vec<u8>>> {
let res = match r {
Ok(Some(v)) => v,
Ok(None) => return Ok(None),
Err(e) => match e.code() {
Code::Unknown => {
// Unknown most likely just means we lost the
// connection. This is due to a shutdown and shouldn't
// be as noisy as other errors.
return Ok(None);
}
_ => {
log::warn!("ERROR {:?}", e);
return Err(error_calling_remote_method(e));
}
},
};
let mut buf = Vec::with_capacity(res.encoded_len());
res.encode(&mut buf).unwrap();
Ok(Some(buf))
}
fn node_from_inner(inner: gl::node::Node, grpc_uri: String) -> PyResult<Node> {
// Connect to both interfaces in parallel to avoid doubling the startup time:
// TODO: Could be massively simplified by using a scoped task
// from tokio_scoped to a
let (client, gclient, cln_client) = exec(async {
let i = inner.clone();
let u = grpc_uri.clone();
let h1 = tokio::spawn(async move { i.connect(u).await });
let i = inner.clone();
let u = grpc_uri.clone();
let h2 = tokio::spawn(async move { i.connect(u).await });
let i = inner.clone();
let u = grpc_uri.clone();
let h3 = tokio::spawn(async move { i.connect(u).await });
Ok::<(gl::node::Client, gl::node::GClient, gl::node::ClnClient), anyhow::Error>((
h1.await??,
h2.await??,
h3.await??,
))
})
.map_err(|e| {
pyo3::exceptions::PyValueError::new_err(format!("could not connect to node: {}", e))
})?;
Ok(Node {
client,
gclient,
cln_client,
})
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/lib.rs | libs/gl-client-py/src/lib.rs | use gl_client::{bitcoin, export::decrypt_with_seed};
use pyo3::prelude::*;
#[macro_use]
extern crate log;
mod credentials;
mod node;
mod pairing;
mod runtime;
mod scheduler;
mod signer;
mod tls;
pub use node::Node;
pub use scheduler::Scheduler;
pub use signer::{Signer, SignerHandle};
pub use tls::TlsConfig;
#[pyfunction]
pub fn backup_decrypt_with_seed(encrypted: Vec<u8>, seed: Vec<u8>) -> PyResult<Vec<u8>> {
use pyo3::exceptions::PyValueError;
let mut bytes = bytes::BytesMut::zeroed(encrypted.len());
bytes.clone_from_slice(&encrypted);
let seed = bitcoin::secp256k1::SecretKey::from_slice(&seed)
.map_err(|e| PyValueError::new_err(format!("error decoding secret: {}", e)))?;
let res = decrypt_with_seed(bytes, &seed)
.map_err(|e| PyValueError::new_err(format!("error decrypting: {}", e)))?;
Ok(res[..].into())
}
/// A Python module implemented in Rust.
#[pymodule]
fn glclient(_py: Python, m: &PyModule) -> PyResult<()> {
env_logger::init();
m.add_class::<Signer>()?;
m.add_class::<SignerHandle>()?;
m.add_class::<Node>()?;
m.add_class::<Scheduler>()?;
m.add_class::<TlsConfig>()?;
m.add_class::<credentials::Credentials>()?;
m.add_class::<pairing::NewDeviceClient>()?;
m.add_class::<pairing::AttestationDeviceClient>()?;
m.add_function(wrap_pyfunction!(backup_decrypt_with_seed, m)?)?;
Ok(())
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/pairing.rs | libs/gl-client-py/src/pairing.rs | use crate::credentials::{self, Credentials};
use crate::runtime::exec;
use bytes::BufMut;
use gl_client::pairing::{attestation_device, new_device, PairingSessionData};
use gl_client::pb::scheduler::GetPairingDataResponse;
use prost::Message;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use tokio::sync::mpsc;
#[pyclass]
pub struct NewDeviceClient {
inner: new_device::Client<new_device::Connected>,
}
#[pymethods]
impl NewDeviceClient {
#[new]
fn new(creds: Credentials, uri: Option<String>) -> Result<Self> {
let mut client = new_device::Client::new(creds.inner);
if let Some(uri) = uri {
client = client.with_uri(uri);
}
let inner = exec(client.connect())?;
Ok(Self { inner })
}
fn pair_device(
&self,
name: &str,
description: &str,
restrictions: &str,
) -> Result<PyPairingChannelWrapper> {
let inner = exec(self.inner.pair_device(name, description, restrictions))?;
Ok(PyPairingChannelWrapper { inner })
}
}
#[pyclass]
pub struct AttestationDeviceClient {
inner: attestation_device::Client<attestation_device::Connected, credentials::PyCredentials>,
}
#[pymethods]
impl AttestationDeviceClient {
#[new]
fn new(creds: Credentials, uri: Option<String>) -> Result<Self> {
let mut client = attestation_device::Client::new(creds.inner)?;
if let Some(uri) = uri {
client = client.with_uri(uri);
}
let inner = exec(client.connect())?;
Ok(AttestationDeviceClient { inner })
}
fn get_pairing_data(&self, device_id: &str) -> Result<Vec<u8>> {
Ok(convert(exec(async move {
self.inner.get_pairing_data(device_id).await
}))?)
}
fn approve_pairing(
&self,
device_id: &str,
device_name: &str,
restrs: &str,
) -> Result<Vec<u8>> {
Ok(convert(exec(async move {
self.inner
.approve_pairing(device_id, device_name, restrs)
.await
}))?)
}
fn verify_pairing_data(&self, data: Vec<u8>) -> Result<()> {
let pd = GetPairingDataResponse::decode(&data[..])?;
Ok(attestation_device::Client::<
attestation_device::Connected,
credentials::PyCredentials,
>::verify_pairing_data(pd)?)
}
}
/// A wrapper class to return an iterable from a mpsc channel.
#[pyclass]
pub struct PyPairingChannelWrapper {
inner: mpsc::Receiver<PairingSessionData>,
}
#[pymethods]
impl PyPairingChannelWrapper {
fn __iter__(slf: PyRef<Self>) -> PyRef<Self> {
slf
}
fn __next__(mut slf: PyRefMut<'_, Self>) -> Option<Vec<u8>> {
slf.recv().ok()
}
fn recv(&mut self) -> PyResult<Vec<u8>> {
let receiver = &mut self.inner;
exec(async move {
match receiver.recv().await {
Some(data) => match data {
PairingSessionData::PairingResponse(d) => convert_pairing(d, 1),
PairingSessionData::PairingQr(d) => convert_pairing(d, 2),
PairingSessionData::PairingError(d) => {
debug!("pairing returned a PairingError {}", d);
Err(PyValueError::new_err(d.to_string()))
}
},
None => Err(PyValueError::new_err("channel error")),
}
})
}
}
// Prepends a type to the message to identify the type in python.
pub fn convert_pairing<T: Message>(msg: T, typ: u8) -> PyResult<Vec<u8>> {
let mut buf = Vec::with_capacity(msg.encoded_len() + 1);
buf.put_u8(typ);
msg.encode(&mut buf)
.map_err(|e| PyValueError::new_err(e.to_string()))?;
Ok(buf)
}
// Converts a the message into bytes
fn convert<T: Message, E>(r: Result<T, E>) -> Result<Vec<u8>, E> {
let res = r?;
let mut buf = Vec::with_capacity(res.encoded_len());
res.encode(&mut buf).unwrap();
Ok(buf)
}
type Result<T, E = ErrorWrapper> = std::result::Result<T, E>;
#[derive(thiserror::Error, Debug)]
pub enum ErrorWrapper {
#[error("{}", .0)]
PairingError(#[from] gl_client::pairing::Error),
#[error("{}", .0)]
ProtoError(#[from] prost::DecodeError),
}
impl From<ErrorWrapper> for pyo3::PyErr {
fn from(value: ErrorWrapper) -> Self {
PyErr::new::<PyValueError, _>(value.to_string())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/lsps.rs | libs/gl-client-py/src/lsps.rs | use crate::runtime::exec;
use gl_client::lsps::client::LspClient as LspClientInner;
use gl_client::lsps::error::LspsError;
use gl_client::lsps::json_rpc::{generate_random_rpc_id, JsonRpcResponse};
use gl_client::lsps::message as lsps_message;
use gl_client::node::{Client, ClnClient};
use pyo3::exceptions::{PyBaseException, PyConnectionError, PyTimeoutError, PyValueError};
use pyo3::prelude::*;
use pyo3::types::PyBytes;
use pyo3::PyErr;
use hex::ToHex;
#[pyclass]
pub struct LspClient {
lsp_client: LspClientInner,
}
impl LspClient {
pub fn new(client: Client, cln_client: ClnClient) -> Self {
LspClient {
lsp_client: LspClientInner::new(client, cln_client),
}
}
}
fn lsps_err_to_py_err(err: &LspsError) -> PyErr {
match err {
LspsError::MethodUnknown(method_name) => {
PyValueError::new_err(format!("Unknown method {:?}", method_name))
}
LspsError::ConnectionClosed => PyConnectionError::new_err("Failed to connect"),
LspsError::GrpcError(status) => PyConnectionError::new_err(String::from(status.message())),
LspsError::Timeout => PyTimeoutError::new_err("Did not receive a response from the LSPS"),
LspsError::JsonParseRequestError(error) => {
PyValueError::new_err(format!("Failed to parse json-request, {:}", error))
}
LspsError::JsonParseResponseError(error) => {
PyValueError::new_err(format!("Failed to parse json-response, {:}", error))
}
LspsError::Other(error_message) => PyBaseException::new_err(String::from(error_message)),
}
}
#[pymethods]
impl LspClient {
// When doing ffi with python we'de like to keep the interface as small as possible.
//
// We already have JSON-serialization and deserialization working because the underlying protocol uses JSON-rpc
//
// When one of the JSON-rpc method is called from python the user can just specify the peer-id and the serialized parameter they want to send
// The serialized result will be returned
pub fn rpc_call(
&mut self,
py: Python,
peer_id: &[u8],
method_name: &str,
value: &[u8],
) -> PyResult<PyObject> {
let json_rpc_id = generate_random_rpc_id();
self.rpc_call_with_json_rpc_id(py, peer_id, method_name, value, json_rpc_id)
}
pub fn rpc_call_with_json_rpc_id(
&mut self,
py: Python,
peer_id: &[u8],
method_name: &str,
value: &[u8],
json_rpc_id: String,
) -> PyResult<PyObject> {
// Parse the method-name and call the rpc-request
let rpc_response: JsonRpcResponse<Vec<u8>, Vec<u8>> =
lsps_message::JsonRpcMethodEnum::from_method_name(method_name)
.and_then(|method| {
exec(self.lsp_client.request_with_json_rpc_id(
peer_id,
&method,
value.to_vec(),
json_rpc_id,
))
})
.map_err(|err| lsps_err_to_py_err(&err))?;
match rpc_response {
JsonRpcResponse::Ok(ok) => {
let response = ok.result; // response as byte-array
let py_object: PyObject = PyBytes::new(py, &response).into();
return Ok(py_object);
}
JsonRpcResponse::Error(err) => {
// We should be able to put the error-data in here
// Replace this by a custom exception type
return Err(PyBaseException::new_err(format!(
"{:?} - {:?}",
err.error.code, err.error.message
)));
}
}
}
pub fn list_lsp_servers(&mut self) -> PyResult<Vec<String>> {
let result = exec(self.lsp_client.list_lsp_servers());
match result {
Ok(result) => Ok(result.iter().map(|x| x.encode_hex()).collect()),
Err(err) => Err(lsps_err_to_py_err(&err)),
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/runtime.rs | libs/gl-client-py/src/runtime.rs | use ::tokio::runtime::{Builder, Runtime};
use once_cell::sync::OnceCell;
use pyo3::prelude::Python;
use std::future::Future;
static TOKIO_RUNTIME: OnceCell<Runtime> = OnceCell::new();
pub(crate) fn get_runtime<'a>() -> &'a Runtime {
TOKIO_RUNTIME.get_or_init(|| {
let mut builder = Builder::new_multi_thread();
builder.enable_all();
builder.build().expect("Unable to build Tokio runtime")
})
}
pub(crate) fn exec<F, T>(f: F) -> T
where
F: Future<Output = T> + Sized + Send,
T: Send,
{
Python::with_gil(|py| py.allow_threads(move || get_runtime().block_on(f)))
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/credentials.rs | libs/gl-client-py/src/credentials.rs | use crate::runtime::exec;
use crate::scheduler::Scheduler;
use crate::signer::Signer;
use gl_client::credentials::{self, NodeIdProvider, RuneProvider, TlsConfigProvider};
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
use pyo3::types::PyBytes;
pub type PyCredentials = UnifiedCredentials<credentials::Nobody, credentials::Device>;
#[derive(Clone)]
pub enum UnifiedCredentials<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
Nobody(T),
Device(R),
}
impl<T, R> UnifiedCredentials<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
pub fn ensure_nobody(&self) -> Result<()> {
if let Self::Nobody(_) = self {
Ok(())
} else {
Err(credentials::Error::IsIdentityError(
"credentials are not of type nobody".to_string(),
))?
}
}
pub fn ensure_device(&self) -> Result<()> {
if let Self::Device(_) = self {
Ok(())
} else {
Err(credentials::Error::IsIdentityError(
"credentials are not of type device".to_string(),
))?
}
}
}
impl<T, R> TlsConfigProvider for UnifiedCredentials<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
fn tls_config(&self) -> gl_client::tls::TlsConfig {
match self {
UnifiedCredentials::Nobody(n) => n.tls_config(),
UnifiedCredentials::Device(d) => d.tls_config(),
}
}
}
impl<T, R> RuneProvider for UnifiedCredentials<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
fn rune(&self) -> String {
match self {
UnifiedCredentials::Nobody(_) => panic!(
"can not provide rune from nobody credentials! something really bad happened."
),
UnifiedCredentials::Device(d) => d.rune(),
}
}
}
impl<T, R> NodeIdProvider for UnifiedCredentials<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
fn node_id(&self) -> credentials::Result<Vec<u8>> {
match self {
UnifiedCredentials::Nobody(_) => panic!(
"can not provide node_id from nobody credentials! something really bad happened."
),
UnifiedCredentials::Device(d) => d.node_id(),
}
}
}
#[pyclass]
#[derive(Clone)]
pub struct Credentials {
pub inner: PyCredentials,
}
#[pymethods]
impl Credentials {
#[new]
pub fn new() -> Self {
let inner = UnifiedCredentials::Nobody(gl_client::credentials::Nobody::default());
log::debug!("Created NOBODY credentials");
Self { inner }
}
#[staticmethod]
pub fn nobody_with(cert: &[u8], key: &[u8]) -> Self {
let inner = UnifiedCredentials::Nobody(gl_client::credentials::Nobody::with(cert, key));
log::debug!("Created NOBODY credentials");
Self { inner }
}
#[staticmethod]
pub fn from_path(path: &str) -> Self {
let inner = UnifiedCredentials::Device(gl_client::credentials::Device::from_path(path));
log::debug!("Created device credentials");
Self { inner }
}
#[staticmethod]
pub fn from_bytes(data: &[u8]) -> Self {
let inner = UnifiedCredentials::Device(gl_client::credentials::Device::from_bytes(data));
log::debug!("Created device credentials");
Self { inner }
}
#[staticmethod]
pub fn from_parts(cert: &[u8], key: &[u8], rune: &str) -> Self {
let inner =
UnifiedCredentials::Device(gl_client::credentials::Device::with(cert, key, rune));
Self { inner }
}
pub fn upgrade(&self, scheduler: &Scheduler, signer: &Signer) -> Result<Credentials> {
match &self.inner {
UnifiedCredentials::Nobody(_) => Err(credentials::Error::IsIdentityError(
"can not upgrade nobody credentials".to_string(),
))?,
UnifiedCredentials::Device(creds) => match &scheduler.inner {
crate::scheduler::UnifiedScheduler::Unauthenticated(u) => {
let d = exec(async move { creds.clone().upgrade(u, &signer.inner).await })?;
let inner = UnifiedCredentials::Device(d);
Ok(Self { inner })
}
crate::scheduler::UnifiedScheduler::Authenticated(a) => {
let d = exec(async move { creds.clone().upgrade(a, &signer.inner).await })?;
let inner = UnifiedCredentials::Device(d);
Ok(Self { inner })
}
},
}
}
pub fn to_bytes<'a>(&self, py: Python<'a>) -> Result<&'a PyBytes> {
match &self.inner {
UnifiedCredentials::Nobody(_) => Err(credentials::Error::IsIdentityError(
"can not convert nobody into bytes".to_string(),
))?,
UnifiedCredentials::Device(d) => Ok(PyBytes::new(py, &d.to_bytes()[..])),
}
}
pub fn ensure_device(&self) -> Result<()> {
self.inner.ensure_device()
}
pub fn ensure_nobody(&self) -> Result<()> {
self.inner.ensure_nobody()
}
pub fn node_id(&self) -> Result<Vec<u8>> {
Ok(self.inner.node_id()?)
}
pub fn with_ca(&self, ca: &[u8]) -> Self {
match &self.inner {
UnifiedCredentials::Nobody(creds) => {
let n = creds.clone().with_ca(ca);
let inner = UnifiedCredentials::Nobody(n);
Self { inner }
}
UnifiedCredentials::Device(creds) => {
let d = creds.clone().with_ca(ca);
let inner = UnifiedCredentials::Device(d);
Self { inner }
},
}
}
}
type Result<T, E = ErrorWrapper> = std::result::Result<T, E>;
#[derive(thiserror::Error, Debug)]
pub enum ErrorWrapper {
#[error("{}", .0)]
CredentialsError(#[from] credentials::Error),
}
impl From<ErrorWrapper> for pyo3::PyErr {
fn from(value: ErrorWrapper) -> Self {
PyErr::new::<PyValueError, _>(value.to_string())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client-py/src/scheduler.rs | libs/gl-client-py/src/scheduler.rs | use crate::credentials::{Credentials, PyCredentials};
use crate::runtime::exec;
use crate::Signer;
use anyhow::{anyhow, Result};
use gl_client::bitcoin::Network;
use gl_client::credentials::{NodeIdProvider, RuneProvider};
use gl_client::credentials::TlsConfigProvider;
use gl_client::pb;
use gl_client::scheduler;
use prost::Message;
use pyo3::exceptions::PyValueError;
use pyo3::prelude::*;
#[derive(Clone)]
pub enum UnifiedScheduler<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
Unauthenticated(scheduler::Scheduler<T>),
Authenticated(scheduler::Scheduler<R>),
}
impl<T, R> UnifiedScheduler<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
pub fn is_authenticated(&self) -> Result<()> {
if let Self::Authenticated(_) = self {
Ok(())
} else {
Err(anyhow!("scheduler is unauthenticated",))?
}
}
async fn register(
&self,
signer: &gl_client::signer::Signer,
invite_code: Option<String>,
) -> Result<pb::scheduler::RegistrationResponse> {
match self {
UnifiedScheduler::Unauthenticated(u) => u.register(&signer, invite_code).await,
UnifiedScheduler::Authenticated(a) => a.register(&signer, invite_code).await,
}
}
async fn recover(
&self,
signer: &gl_client::signer::Signer,
) -> Result<pb::scheduler::RecoveryResponse> {
match self {
UnifiedScheduler::Unauthenticated(u) => u.recover(&signer).await,
UnifiedScheduler::Authenticated(a) => a.recover(&signer).await,
}
}
async fn authenticate(self, creds: R) -> Result<Self> {
match self {
UnifiedScheduler::Unauthenticated(u) => {
let inner = u.authenticate(creds).await?;
Ok(Self::Authenticated(inner))
}
UnifiedScheduler::Authenticated(_) => {
Err(anyhow!("scheduler is already authenticated"))
}
}
}
}
/// The following implementations need an authenticated scheduler.
impl<T, R> UnifiedScheduler<T, R>
where
T: TlsConfigProvider,
R: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
async fn export_node(&self) -> Result<pb::scheduler::ExportNodeResponse> {
let s = self.authenticated_scheduler()?;
s.export_node().await
}
async fn schedule(&self) -> Result<pb::scheduler::NodeInfoResponse> {
let s = self.authenticated_scheduler()?;
s.schedule().await
}
async fn node(&self) -> Result<pb::scheduler::NodeInfoResponse> {
let s = self.authenticated_scheduler()?;
s.schedule().await
}
async fn get_node_info(&self, wait: bool) -> Result<pb::scheduler::NodeInfoResponse> {
let s = self.authenticated_scheduler()?;
s.get_node_info(wait).await
}
async fn get_invite_codes(&self) -> Result<pb::scheduler::ListInviteCodesResponse> {
let s = self.authenticated_scheduler()?;
s.get_invite_codes().await
}
async fn add_outgoing_webhook(
&self,
uri: String,
) -> Result<pb::scheduler::AddOutgoingWebhookResponse> {
let s = self.authenticated_scheduler()?;
s.add_outgoing_webhook(uri).await
}
async fn list_outgoing_webhooks(&self) -> Result<pb::scheduler::ListOutgoingWebhooksResponse> {
let s = self.authenticated_scheduler()?;
s.list_outgoing_webhooks().await
}
async fn delete_outgoing_webhooks(&self, ids: Vec<i64>) -> Result<pb::Empty> {
let s = self.authenticated_scheduler()?;
s.delete_webhooks(ids).await
}
async fn rotate_outgoing_webhook_secret(
&self,
webhook_id: i64,
) -> Result<pb::scheduler::WebhookSecretResponse> {
let s = self.authenticated_scheduler()?;
s.rotate_outgoing_webhook_secret(webhook_id).await
}
fn authenticated_scheduler(&self) -> Result<&scheduler::Scheduler<R>> {
match self {
UnifiedScheduler::Unauthenticated(_) => {
Err(anyhow!("scheduler needs to be authenticated"))
}
UnifiedScheduler::Authenticated(a) => Ok(a),
}
}
}
#[pyclass]
pub struct Scheduler {
pub inner: UnifiedScheduler<PyCredentials, PyCredentials>,
}
#[pymethods]
impl Scheduler {
#[new]
fn new(network: &str, creds: Credentials) -> PyResult<Scheduler> {
let network: Network = network
.parse()
.map_err(|_| PyValueError::new_err("Error parsing the network"))?;
let uri = gl_client::utils::scheduler_uri();
let inner = match creds.inner {
crate::credentials::UnifiedCredentials::Nobody(_) => {
let scheduler = exec(async move {
gl_client::scheduler::Scheduler::with(network, creds.inner.clone(), uri)
.await
})
.map_err(|e| PyValueError::new_err(e.to_string()))?;
UnifiedScheduler::Unauthenticated(scheduler)
}
crate::credentials::UnifiedCredentials::Device(_) => {
let scheduler = exec(async move {
gl_client::scheduler::Scheduler::with(network, creds.inner.clone(), uri)
.await
})
.map_err(|e| PyValueError::new_err(e.to_string()))?;
UnifiedScheduler::Authenticated(scheduler)
}
};
Ok(Scheduler { inner })
}
fn register(&self, signer: &Signer, invite_code: Option<String>) -> PyResult<Vec<u8>> {
convert(exec(async {
self.inner.register(&signer.inner, invite_code).await
}))
}
fn recover(&self, signer: &Signer) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.recover(&signer.inner).await }))
}
fn authenticate(&self, creds: Credentials) -> PyResult<Self> {
creds.ensure_device().map_err(|_| {
PyValueError::new_err(
"can not authenticate scheduler, need device credentials".to_string(),
)
})?;
let s =
exec(async { self.inner.clone().authenticate(creds.inner).await }).map_err(|e| {
PyValueError::new_err(format!(
"could not authenticate scheduler {}",
e.to_string()
))
})?;
Ok(Scheduler {
inner: s,
})
}
fn export_node(&self) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.export_node().await }))
}
fn schedule(&self) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.schedule().await }))
}
fn node(&self) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.node().await }))
}
fn get_invite_codes(&self) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.get_invite_codes().await }))
}
fn get_node_info(&self, wait: bool) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.get_node_info(wait).await }))
}
fn add_outgoing_webhook(&self, uri: String) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.add_outgoing_webhook(uri).await }))
}
fn list_outgoing_webhooks(&self) -> PyResult<Vec<u8>> {
convert(exec(async { self.inner.list_outgoing_webhooks().await }))
}
fn delete_outgoing_webhooks(&self, webhook_ids: Vec<i64>) -> PyResult<Vec<u8>> {
convert(exec(async {
self.inner.delete_outgoing_webhooks(webhook_ids).await
}))
}
fn rotate_outgoing_webhook_secret(&self, webhook_id: i64) -> PyResult<Vec<u8>> {
convert(exec(async {
self.inner.rotate_outgoing_webhook_secret(webhook_id).await
}))
}
}
pub fn convert<T: Message>(r: Result<T>) -> PyResult<Vec<u8>> {
let res = r.map_err(crate::node::error_calling_remote_method)?;
let mut buf = Vec::with_capacity(res.encoded_len());
res.encode(&mut buf).unwrap();
Ok(buf)
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/build.rs | libs/gl-plugin/build.rs | fn main() {
tonic_build::configure()
.build_client(true)
.build_server(true)
.type_attribute(
"TrampolinePayRequest",
"#[derive(serde::Serialize, serde::Deserialize)]",
)
.compile(
&[".resources/proto/glclient/greenlight.proto"],
&[".resources/proto/glclient"],
)
.unwrap();
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/unix.rs | libs/gl-plugin/src/unix.rs | use std::{
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tonic::transport::server::Connected;
#[derive(Debug)]
pub struct UnixStream(pub tokio::net::UnixStream);
impl Connected for UnixStream {
type ConnectInfo = UdsConnectInfo;
fn connect_info(&self) -> Self::ConnectInfo {
UdsConnectInfo {
peer_addr: self.0.peer_addr().ok().map(Arc::new),
peer_cred: self.0.peer_cred().ok(),
}
}
}
#[derive(Clone, Debug)]
#[allow(dead_code)] // TODO: Check if this is really needed.
pub struct UdsConnectInfo {
pub peer_addr: Option<Arc<tokio::net::unix::SocketAddr>>,
pub peer_cred: Option<tokio::net::unix::UCred>,
}
impl AsyncRead for UnixStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
impl AsyncWrite for UnixStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.0).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.0).poll_shutdown(cx)
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/config.rs | libs/gl-plugin/src/config.rs | use anyhow::{anyhow, Context, Result};
use log::trace;
use std::net::SocketAddr;
use tonic::transport;
/// Enumeration of supported networks
#[derive(Clone, Debug)]
pub enum Network {
Bitcoin = 0,
Testnet = 1,
Regtest = 2,
}
impl TryFrom<i16> for Network {
type Error = anyhow::Error;
fn try_from(i: i16) -> Result<Network> {
match i {
0 => Ok(Network::Bitcoin),
1 => Ok(Network::Testnet),
2 => Ok(Network::Regtest),
e => Err(anyhow!("Unknown numeric network {}", e)),
}
}
}
impl TryFrom<String> for Network {
type Error = anyhow::Error;
fn try_from(s: String) -> Result<Network> {
match s.to_lowercase().as_ref() {
"bitcoin" => Ok(Network::Bitcoin),
"testnet" => Ok(Network::Testnet),
"regtest" => Ok(Network::Regtest),
o => Err(anyhow!("Unknown network {}", o)),
}
}
}
impl std::fmt::Display for Network {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(
f,
"{}",
match *self {
Network::Bitcoin => "bitcoin",
Network::Testnet => "testnet",
Network::Regtest => "regtest",
}
)
}
}
#[derive(Clone, Debug)]
pub struct Identity {
pub id: transport::Identity,
pub ca: transport::Certificate,
}
impl Identity {
/// Loads an identity from the file-system
///
/// Starting from the current directory, we go and look for
/// certificates and keys for the specified identity. If
/// `GL_CERT_PATH` is set we instead use that path to look for
/// the certificates.
///
/// If any of the files we expect to find is missing
/// (`ca.pem`, `<path>.crt` or `<path>-key.pem`), an error is
/// returned.
pub fn from_path(path: &str) -> Result<Identity> {
let mut dir = std::env::current_dir()?;
dir.push(std::env::var("GL_CERT_PATH").unwrap_or("./certs/".into()));
let mut dir = dir.canonicalize().with_context(|| {
format!(
"could not canonicalize GL_CERT_PATH={}",
dir.to_string_lossy()
)
})?;
let cacert = dir.join("ca.pem");
trace!("Loading root CA from {}", cacert.to_string_lossy());
/* The root CA cert is at the root of the certs directory. */
let ca = std::fs::read(cacert).with_context(|| {
format!(
"Could not load CA certificate from {}",
dir.join("ca.pem").to_string_lossy()
)
})?;
/* Find the subdirectory. */
for p in path.to_string().split("/").skip(1).collect::<Vec<&str>>() {
dir = dir.join(p);
}
let client_key_path = format!("{}-key.pem", dir.to_string_lossy());
let client_cert_path = format!("{}.crt", dir.to_string_lossy());
trace!(
"Loading identity from {} and certificate from {}",
client_key_path,
client_cert_path
);
let client_cert = std::fs::read(&client_cert_path).with_context(|| {
format!(
"could not read client certificate from {:?}",
client_cert_path
)
})?;
let client_key = std::fs::read(&client_key_path)
.with_context(|| format!("could not read client key from {:?}", client_key_path))?;
Ok(Identity {
id: tonic::transport::Identity::from_pem(client_cert, client_key),
ca: tonic::transport::Certificate::from_pem(ca),
})
}
}
#[derive(Clone, Debug)]
pub struct Config {
pub identity: Identity,
pub hsmd_sock_path: String,
pub node_grpc_binding: String,
pub node_info: NodeInfo,
pub towerd_public_grpc_uri: Option<String>,
/// The `clientca` is the CA we're enforcing when connecting to
/// other services. This means that services must have a valid
/// certificate under this CA otherwise the connection is
/// closed. This is _not_ the CA we use to enforce User
/// identities. See [`Identity`] for this purpose.
pub clientca: tonic::transport::Certificate,
/// The `Nodelet` told us that we're running on this network.
pub network: Network,
pub node_config: NodeConfig,
}
impl Config {
pub fn new() -> Result<Self> {
let binding: SocketAddr = std::env::var("GL_NODE_BIND")
.context("Missing GL_NODE_BIND environment variable")?
.parse()
.context("Could not parse address from GL_BIND_NODE")?;
let towerd_public_grpc_uri: Option<String> = std::env::var("GL_TOWER_PUBLIC_GRPC_URI").ok();
let clientca_path: String = std::env::var("GL_PLUGIN_CLIENTCA_PATH")
.context("Missing GL_PLUGIN_CLIENTCA_PATH environment variable")?;
let identity = Identity::from_path(&"/users/1/server")?;
let clientca = tonic::transport::Certificate::from_pem(std::fs::read(clientca_path)?);
let network: Network = std::env::var("GL_NODE_NETWORK")
.context("Missing GL_NODE_NETWORK")?
.try_into()
.context("Unknown network in GL_NODE_NETWORK")?;
let mut cfg = std::env::current_dir()?;
cfg.push("node_config.pb");
let node_config = NodeConfig::from_file(cfg.as_path())?;
Ok(Config {
identity,
hsmd_sock_path: "hsmd.sock".to_string(),
node_grpc_binding: binding.to_string(),
node_info: NodeInfo::new()?,
towerd_public_grpc_uri,
clientca,
network,
node_config,
})
}
}
#[derive(Clone, Debug)]
pub struct NodeInfo {
pub node_id: Vec<u8>,
pub initmsg: Vec<u8>,
}
impl NodeInfo {
fn new() -> Result<Self> {
let node_id = match std::env::var("GL_NODE_ID") {
Ok(v) => hex::decode(v)?,
Err(_) => return Err(anyhow!("Environment variable GL_NODE_ID is not set")),
};
let initmsg = hex::decode(std::env::var("GL_NODE_INIT").context("Missing GL_NODE_INIT")?)
.context("Malformed GL_NODE_INIT")?;
if node_id.len() != 33 {
return Err(anyhow!(
"GL_NODE_ID is not a 33 byte hex-encoded public-key",
));
}
Ok(NodeInfo { node_id, initmsg })
}
}
pub use crate::pb::NodeConfig;
impl NodeConfig {
pub fn from_file(f: &std::path::Path) -> Result<NodeConfig, anyhow::Error> {
log::debug!("Loading node_config from {}", f.display());
use prost::Message;
let contents = std::fs::read(f)?;
NodeConfig::decode(&contents[..]).context("decoding protobuf payload")
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/responses.rs | libs/gl-plugin/src/responses.rs | //! Various structs representing JSON-RPC responses
pub use clightningrpc::responses::*;
use serde::{de, Deserialize, Deserializer};
use std::str::FromStr;
/// A simple wrapper that generalizes bare amounts and amounts with
/// the `msat` suffix.
#[derive(Clone, Copy, Debug)]
pub struct MSat(pub u64);
struct MSatVisitor;
impl<'d> de::Visitor<'d> for MSatVisitor {
type Value = MSat;
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
if !s.ends_with("msat") {
return Err(E::custom("missing msat suffix"));
}
let numpart = s
.get(0..(s.len() - 4))
.ok_or_else(|| E::custom("missing msat suffix"))?;
let res = u64::from_str(numpart).map_err(|_| E::custom("not a number"))?;
Ok(MSat(res))
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(MSat(v as u64))
}
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "a bare integer or a string ending with \"msat\"")
}
}
impl<'d> Deserialize<'d> for MSat {
fn deserialize<D>(deserializer: D) -> Result<MSat, D::Error>
where
D: Deserializer<'d>,
{
deserializer.deserialize_any(MSatVisitor)
}
}
impl std::fmt::Display for MSat {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}msat", self.0)
}
}
#[derive(Debug, Clone, Deserialize)]
pub struct Withdraw {
pub tx: String,
pub txid: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct FundChannel {
pub tx: String,
pub txid: String,
#[serde(rename = "outnum")]
pub outpoint: u32,
pub channel_id: String,
pub close_to: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GetInfo {
pub id: String,
pub alias: String,
pub color: String,
pub num_peers: u64,
pub num_pending_channels: u64,
pub num_active_channels: u64,
pub num_inactive_channels: u64,
pub version: String,
pub blockheight: u32,
pub network: String,
#[serde(rename = "lightning-dir")]
pub ligthning_dir: String,
pub warning_bitcoind_sync: Option<String>,
pub warning_lightningd_sync: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CloseChannel {
#[serde(rename = "type")]
pub close_type: String,
pub tx: String,
pub txid: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Invoice {
#[serde(rename = "expires_at")]
pub expiry_time: u32,
pub bolt11: String,
pub payment_hash: String,
pub payment_secret: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Pay {
pub destination: String,
pub payment_hash: String,
pub created_at: f64,
pub completed_at: Option<u64>,
pub parts: u32,
pub msatoshi: u64,
pub msatoshi_sent: u64,
pub preimage: Option<String>,
pub status: String,
pub bolt11: Option<String>,
}
// Sadly the results of pay differ from the listpays elements, so we
// have to replicate this struct here, until we merge them correctly.
#[derive(Debug, Clone, Deserialize)]
pub struct ListPaysPay {
pub bolt11: Option<String>,
pub destination: String,
pub payment_hash: String,
pub created_at: f64,
pub completed_at: Option<u64>,
// parts is missing
// msatoshi is renamed amount_msat
pub amount_msat: Option<MSat>,
pub amount_sent_msat: MSat,
#[serde(rename = "preimage")]
pub payment_preimage: Option<String>,
pub status: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListPays {
pub pays: Vec<ListPaysPay>,
}
// Invoices returned as part of a listinvoices call
#[derive(Debug, Clone, Deserialize)]
pub struct ListInvoiceInvoice {
pub label: String,
pub description: String,
pub payment_preimage: Option<String>,
#[serde(rename = "amount_msat")]
pub amount: Option<MSat>,
#[serde(rename = "amount_received_msat")]
pub received: Option<MSat>,
#[serde(rename = "paid_at")]
pub payment_time: Option<u32>,
pub status: String,
#[serde(rename = "expires_at")]
pub expiry_time: u32,
pub bolt11: String,
pub payment_hash: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListInvoices {
pub invoices: Vec<ListInvoiceInvoice>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Keysend {
pub destination: String,
pub status: String,
pub payment_preimage: Option<String>,
pub payment_hash: String,
pub msatoshi: Option<u64>,
pub msatoshi_sent: Option<u64>,
pub amount_sent_msat: Option<MSat>,
pub amount_msat: Option<MSat>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListIncoming {
pub incoming: Vec<IncomingChannel>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct IncomingChannel {
pub id: String,
pub short_channel_id: String,
pub fee_base_msat: String,
pub fee_proportional_millionths: u32,
pub cltv_expiry_delta: u32,
pub incoming_capacity_msat: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GetChainInfo {
pub chain: String,
pub headercount: u32,
pub blockcount: u32,
pub ibd: bool,
}
/// Sub-structure for 'getlog' and 'listpeers' item
#[derive(Debug, Clone, Deserialize)]
pub struct LogEntry {
#[serde(rename = "type")]
pub type_: String,
pub num_skipped: Option<u64>,
pub time: Option<String>,
pub node_id: Option<String>,
pub source: Option<String>,
pub log: Option<String>,
pub data: Option<String>,
}
/// 'getlog' command
#[derive(Debug, Clone, Deserialize)]
pub struct GetLog {
pub created_at: String,
pub bytes_used: u64,
pub bytes_max: u64,
pub log: Vec<LogEntry>,
}
/// Sub-structure for htlcs in 'listpeers'
#[derive(Debug, Clone, Deserialize)]
pub struct Htlc {
pub direction: String,
pub id: u64,
pub amount_msat: MSat,
pub expiry: u64,
pub payment_hash: String,
pub state: String,
pub local_trimmed: Option<bool>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Channel {
pub state: String,
pub scratch_txid: Option<String>,
pub owner: Option<String>,
pub short_channel_id: Option<String>,
pub alias: Option<Aliases>,
pub direction: Option<u64>,
pub channel_id: String,
pub funding_txid: String,
pub close_to_addr: Option<String>,
pub close_to: Option<String>,
pub private: bool,
pub to_us_msat: MSat,
pub min_to_us_msat: MSat,
pub max_to_us_msat: MSat,
pub total_msat: MSat,
pub dust_limit_msat: MSat,
pub max_total_htlc_in_msat: MSat,
pub their_reserve_msat: MSat,
pub our_reserve_msat: MSat,
pub spendable_msat: MSat,
pub receivable_msat: MSat,
pub minimum_htlc_in_msat: MSat,
pub their_to_self_delay: u64,
pub our_to_self_delay: u64,
pub max_accepted_htlcs: u64,
pub status: Vec<String>,
pub in_payments_offered: u64,
pub in_offered_msat: MSat,
pub in_payments_fulfilled: u64,
pub in_fulfilled_msat: MSat,
pub out_payments_offered: u64,
pub out_offered_msat: MSat,
pub out_payments_fulfilled: u64,
pub out_fulfilled_msat: MSat,
pub htlcs: Vec<Htlc>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Aliases {
pub local: Option<String>,
pub remote: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct Peer {
pub id: String,
pub connected: bool,
pub netaddr: Option<Vec<String>>,
pub features: Option<String>,
pub channels: Vec<Channel>,
pub log: Option<Vec<LogEntry>>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListPeers {
pub peers: Vec<Peer>,
}
/// Sub-structure for 'listfunds' output
#[derive(Debug, Clone, Deserialize)]
pub struct ListFundsOutput {
pub txid: String,
pub output: u64,
pub amount_msat: MSat,
pub address: String,
pub status: String,
pub reserved: bool,
pub reserved_to_block: Option<u32>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListFundsChannel {
pub peer_id: String,
pub connected: bool,
pub short_channel_id: Option<String>,
pub our_amount_msat: MSat,
pub amount_msat: MSat,
pub funding_txid: String,
pub funding_output: u64,
}
/// 'listfunds' command
#[derive(Debug, Clone, Deserialize)]
pub struct ListFunds {
pub outputs: Vec<ListFundsOutput>,
pub channels: Vec<ListFundsChannel>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct InvoiceResponse {
pub bolt11: String,
pub created_index: u32,
pub expires_at: u32,
pub payment_hash: String,
pub payment_secret: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct LspGetinfoResponse {
pub opening_fee_params_menu: Vec<OpeningFeeParams>,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(deny_unknown_fields)] // LSPS2 requires the client to fail if a field is unrecognized.
pub struct OpeningFeeParams {
pub min_fee_msat: String,
pub proportional: u64,
pub valid_until: String,
pub min_lifetime: u32,
pub max_client_to_self_delay: u32,
pub min_payment_size_msat: String ,
pub max_payment_size_msat: String ,
pub promise: String, // Max 512 bytes
}
impl From<InvoiceResponse> for crate::pb::LspInvoiceResponse {
fn from(o: InvoiceResponse) -> crate::pb::LspInvoiceResponse {
crate::pb::LspInvoiceResponse {
bolt11: o.bolt11,
created_index: o.created_index,
expires_at: o.expires_at,
payment_hash: hex::decode(o.payment_hash).unwrap(),
payment_secret: hex::decode(o.payment_secret).unwrap(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_msat_parsing() {
#[derive(Deserialize)]
struct V {
value: MSat,
}
struct T {
input: &'static str,
output: u64,
}
let tests: Vec<T> = vec![
T {
input: "{\"value\": \"1234msat\"}",
output: 1234,
},
T {
input: "{\"value\": 100000000000}",
output: 100000000000,
},
];
for t in tests {
let v: V = serde_json::from_str(t.input).unwrap();
assert_eq!(v.value.0, t.output);
}
}
#[test]
fn test_invoice_response() {
let tests: Vec<InvoiceResponse> = vec![InvoiceResponse {
bolt11: "ln1test".to_owned(),
created_index: 0,
expires_at: 123,
payment_hash: "AABBCCDDEEFF".to_owned(),
payment_secret: "1122334455".to_owned(),
}];
for t in tests {
let _actual: crate::pb::LspInvoiceResponse = t.into();
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/messages.rs | libs/gl-plugin/src/messages.rs | use anyhow::{anyhow, Error};
use hex::{self, FromHex};
use serde::de::{self, Deserializer};
use serde::ser::{self, Serializer};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
#[derive(Debug)]
pub struct ParserError {
reason: String,
}
impl std::fmt::Display for ParserError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
f.write_fmt(format_args!("ParserError {}", self.reason))
}
}
impl std::error::Error for ParserError {}
// "Inspired" by https://github.com/serde-rs/serde/issues/1028#issuecomment-325434041
#[derive(Serialize, Deserialize, Debug)]
#[serde(tag = "method", content = "params")]
#[serde(rename_all = "snake_case")]
pub enum MyRequests {
HtlcAccepted(HtlcAcceptedCall),
Getmanifest(GetManifestCall),
Init(InitCall),
InvoicePayment(InvoicePaymentCall),
CommitmentRevocation(CommitmentRevocationCall),
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct HtlcAcceptedCall {
pub onion: HtlcAcceptedCallOnion,
pub htlc: HtlcAcceptedCallHtlc,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct InvoicePaymentCall {
pub payment: InvoicePaymentCallPayment,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct Custommsg {
pub peer_id: String,
pub payload: String,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct CommitmentRevocationCall {
pub commitment_txid: String,
pub penalty_tx: String,
pub channel_id: Option<String>,
pub commitnum: Option<u64>,
}
fn amt_from_str_or_int<'de, D>(deserializer: D) -> Result<u64, D::Error>
where
D: de::Deserializer<'de>,
{
struct JsonStringVisitor;
impl<'de> de::Visitor<'de> for JsonStringVisitor {
type Value = u64;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a string containing json data")
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok(v)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
let (nums, exts): (Vec<char>, Vec<char>) = v.chars().partition(|c| c.is_digit(10));
let mut num = String::new();
num.extend(nums);
let mut ext = String::new();
ext.extend(exts);
// Conversion table from the unit to `msat`, since msat is
// the unit of account internally for off-chain payments.
let mult = match ext.as_str() {
"msat" => 1,
"sat" => 1000,
"btc" => 100_000_000_000,
_ => return Err(E::custom("unable to parse unit")),
};
let num: u64 = num.parse::<u64>().expect("converting chars to u64");
Ok(num * mult)
}
}
// use our visitor to deserialize an `ActualValue`
deserializer.deserialize_any(JsonStringVisitor)
}
#[derive(Serialize, Deserialize, Debug)]
pub struct InvoicePaymentCallPayment {
pub label: String,
pub preimage: String,
#[serde(rename = "msat", deserialize_with = "amt_from_str_or_int")]
pub amount: u64,
pub extratlvs: Option<Vec<TlvField>>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct TlvField {
#[serde(rename = "type")]
pub typ: u64,
pub value: String,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct GetManifestCall {}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct GetManifestResult {
pub subscriptions: Vec<String>,
pub hooks: Vec<String>,
pub dynamic: bool,
pub options: Vec<PluginOption>,
pub rpcmethods: Vec<PluginRpcMethod>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct PluginOption {
name: String,
default: String,
description: String,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct PluginRpcMethod {
name: String,
usage: String,
description: String,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct HtlcAcceptedCallOnion {
#[serde(serialize_with = "buffer_to_hex", deserialize_with = "hex_to_buffer")]
pub payload: Vec<u8>,
short_channel_id: Option<String>,
forward_amount: String,
outgoing_cltv_value: u64,
#[serde(serialize_with = "buffer_to_hex", deserialize_with = "hex_to_buffer")]
next_onion: Vec<u8>,
#[serde(serialize_with = "buffer_to_hex", deserialize_with = "hex_to_buffer")]
pub shared_secret: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct HtlcAcceptedCallHtlc {
pub amount: String,
cltv_expiry: u64,
cltv_expiry_relative: u64,
#[serde(serialize_with = "buffer_to_hex", deserialize_with = "hex_to_buffer")]
pub payment_hash: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "snake_case")]
pub struct HtlcAcceptedResponse {
pub result: String,
#[serde(serialize_with = "buffer_to_hex", deserialize_with = "hex_to_buffer")]
pub payment_key: Vec<u8>,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct InitCall {
pub options: Value,
pub configuration: HashMap<String, Value>,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(tag = "method", content = "params")]
#[serde(rename_all = "snake_case")]
pub enum MyNotifications {
Disconnect(DisconnectNotification),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct DisconnectNotification {
pub id: String,
}
#[derive(Debug)]
pub enum JsonRpc<N, R> {
Request(usize, R),
Notification(N),
}
impl<N, R> Serialize for JsonRpc<N, R>
where
N: Serialize,
R: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
JsonRpc::Request(id, ref r) => {
let mut v = serde_json::to_value(r).map_err(ser::Error::custom)?;
v["id"] = json!(id);
v.serialize(serializer)
}
JsonRpc::Notification(ref n) => n.serialize(serializer),
}
}
}
impl<'de, N, R> Deserialize<'de> for JsonRpc<N, R>
where
N: Deserialize<'de>,
R: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
struct IdHelper {
id: Option<usize>,
}
let v = Value::deserialize(deserializer)?;
let helper = IdHelper::deserialize(&v).map_err(de::Error::custom)?;
match helper.id {
Some(id) => {
let r = R::deserialize(v).map_err(de::Error::custom)?;
Ok(JsonRpc::Request(id, r))
}
None => {
let n = N::deserialize(v).map_err(de::Error::custom)?;
Ok(JsonRpc::Notification(n))
}
}
}
}
/// Serializes `buffer` to a lowercase hex string.
pub fn buffer_to_hex<T, S>(buffer: &T, serializer: S) -> Result<S::Ok, S::Error>
where
T: AsRef<[u8]>,
S: Serializer,
{
serializer.serialize_str(&hex::encode(buffer))
}
/// Deserializes a lowercase hex string to a `Vec<u8>`.
pub fn hex_to_buffer<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
use serde::de::Error;
String::deserialize(deserializer)
.and_then(|string| Vec::from_hex(&string).map_err(|err| Error::custom(err.to_string())))
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Amount {
pub msatoshi: i64,
}
impl Amount {
pub fn from_string(s: &str) -> Result<Amount, Error> {
if !s.ends_with("msat") {
return Err(anyhow!("Amount string does not end with msat."));
}
let amount_string: &str = s[0..s.len() - 4].into();
let amount: i64 = match amount_string.parse::<i64>() {
Ok(v) => v,
Err(e) => return Err(anyhow!(e)),
};
Ok(Amount { msatoshi: amount })
}
}
fn _string_to_amount<'de, D>(deserializer: D) -> Result<Amount, D::Error>
where
D: Deserializer<'de>,
{
use serde::de::Error;
String::deserialize(deserializer).and_then(|string| {
Amount::from_string(&string).map_err(|_| Error::custom("could not parse amount"))
})
}
fn _amount_to_string<S>(amount: &Amount, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = format!("{}msat", amount.msatoshi);
serializer.serialize_str(&s)
}
/// PeerConnectedCall is the the message that is returned by the
/// `peer_connected` hook.
#[derive(Serialize, Deserialize, Debug)]
pub struct PeerConnectedCall {
pub peer: Peer
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Peer {
pub id: String,
pub direction: Direction,
pub addr: String,
pub features: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum Direction {
In,
Out
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_peer_connected_call() {
let msg = json!({
"peer": {
"id": "03864ef025fde8fb587d989186ce6a4a186895ee44a926bfc370e2c366597a3f8f",
"direction": "in",
"addr": "34.239.230.56:9735",
"features": ""
}
});
let call = serde_json::from_str::<PeerConnectedCall>(&msg.to_string()).unwrap();
assert_eq!(call.peer.id, "03864ef025fde8fb587d989186ce6a4a186895ee44a926bfc370e2c366597a3f8f");
assert_eq!(call.peer.direction, Direction::In);
assert_eq!(call.peer.addr, "34.239.230.56:9735");
assert_eq!(call.peer.features, "");
}
#[test]
fn test_htlc_accepted_call() {
let req = json!({"id": 1, "jsonrpc": "2.0", "method": "htlc_accepted", "params": {
"onion": {
"payload": "",
"type": "legacy",
"short_channel_id": "1x2x3",
"forward_amount": "42msat",
"outgoing_cltv_value": 500014,
"shared_secret": "0000000000000000000000000000000000000000000000000000000000000000",
"next_onion": "00DEADBEEF00",
},
"htlc": {
"amount": "43msat",
"cltv_expiry": 500028,
"cltv_expiry_relative": 10,
"payment_hash": "0000000000000000000000000000000000000000000000000000000000000000"
}
}
});
type T = JsonRpc<MyNotifications, MyRequests>;
let req = serde_json::from_str::<T>(&req.to_string()).unwrap();
match req {
T::Request(id, c) => {
assert_eq!(id, 1);
match c {
MyRequests::HtlcAccepted(c) => {
//assert_eq!(c.onion.payload, "");
assert_eq!(c.onion.forward_amount, "42msat");
assert_eq!(c.onion.outgoing_cltv_value, 500014);
//assert_eq!(c.onion.next_onion, "[1365bytes of serialized onion]");
//assert_eq!(
// c.onion.shared_secret,
// "0000000000000000000000000000000000000000000000000000000000000000"
//);
//assert_eq!(
// c.htlc.payment_hash,
// "0000000000000000000000000000000000000000000000000000000000000000"
//);
}
_ => panic!("This was supposed to be an htlc_accepted call"),
}
}
_ => panic!("This was supposed to be a request"),
}
}
/// We have a bit of trouble parsing some invoice payment hook
/// calls in 2024/06/03.
#[test]
fn test_invoice_payment_payload() {
let s = "{\"payment\": {\"extratlvs\": [], \"label\": \"{\\\"unix_milli\\\":1717422773673,\\\"payer_amount_msat\\\":null}\", \"msat\": \"42btc\", \"preimage\": \"243adf90767a5c3a8f6118e003c89b3e1ab5a2fd318d49cb41f4d42e92d3de41\"}}";
let v = serde_json::from_str(&s).expect("parsing generic value");
let _c: InvoicePaymentCall = serde_json::from_value(v).expect("parsing into struct");
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/tlv.rs | libs/gl-plugin/src/tlv.rs | use anyhow::anyhow;
use bytes::{Buf, BufMut};
use cln_rpc::primitives::TlvEntry;
use serde::{Deserialize, Deserializer};
/// A standalone type the represent a binary serialized
/// TlvStream. This is distinct from TlvStream since that expects TLV
/// streams to be encoded as maps in JSON.
#[derive(Clone, Debug)]
pub struct SerializedTlvStream {
entries: Vec<TlvEntry>,
}
impl SerializedTlvStream {
pub fn new() -> Self {
Self { entries: vec![] }
}
pub fn get(&self, typ: u64) -> Option<TlvEntry> {
self.entries.iter().filter(|e| e.typ == typ).next().cloned()
}
pub fn insert(&mut self, e: TlvEntry) -> Result<(), anyhow::Error> {
if let Some(old) = self.get(e.typ) {
return Err(anyhow!(
"TlvStream contains entry of type={}, old={:?}, new={:?}",
e.typ,
old,
e
));
}
self.entries.push(e);
self.entries
.sort_by(|a, b| a.typ.partial_cmp(&b.typ).unwrap());
Ok(())
}
pub fn set_bytes<T>(&mut self, typ: u64, val: T)
where
T: AsRef<[u8]>,
{
let pos = self.entries.iter().position(|e| e.typ == typ);
match pos {
Some(i) => self.entries[i].value = val.as_ref().to_vec(),
None => self
.insert(TlvEntry {
typ,
value: val.as_ref().to_vec(),
})
.unwrap(),
}
}
pub fn set_tu64(&mut self, typ: u64, val: TU64) {
let mut b = bytes::BytesMut::new();
b.put_tu64(val);
self.set_bytes(typ, b)
}
}
pub trait FromBytes: Sized {
type Error;
fn from_bytes<T>(s: T) -> Result<Self, Self::Error>
where
T: AsRef<[u8]> + 'static;
}
impl FromBytes for SerializedTlvStream {
type Error = anyhow::Error;
fn from_bytes<T>(s: T) -> Result<Self, Self::Error>
where
T: AsRef<[u8]> + 'static,
{
let mut b = s.as_ref();
//let mut b: bytes::Bytes = r.into();
let mut entries: Vec<TlvEntry> = vec![];
while b.remaining() >= 2 {
let typ = b.get_compact_size() as u64;
let len = b.get_compact_size() as usize;
let value = b.copy_to_bytes(len).to_vec();
entries.push(TlvEntry { typ, value });
}
Ok(SerializedTlvStream { entries })
}
}
pub type CompactSize = u64;
/// A variant of CompactSize that works on length-delimited
/// buffers and therefore does not require a length prefix
pub type TU64 = u64;
/// Extensions on top of `Buf` to include LN proto primitives
pub trait ProtoBuf: Buf {
fn get_compact_size(&mut self) -> CompactSize {
match self.get_u8() {
253 => self.get_u16().into(),
254 => self.get_u32().into(),
255 => self.get_u64(),
v => v.into(),
}
.into()
}
fn get_tu64(&mut self) -> TU64 {
match self.remaining() {
1 => self.get_u8() as u64,
2 => self.get_u16() as u64,
4 => self.get_u32() as u64,
8 => self.get_u64() as u64,
l => panic!("Unexpect TU64 length: {}", l),
}
}
}
impl ProtoBuf for bytes::Bytes {}
impl ProtoBuf for &[u8] {}
impl ProtoBuf for bytes::buf::Take<bytes::Bytes> {}
pub trait ProtoBufMut: bytes::BufMut {
fn put_compact_size(&mut self, cs: CompactSize) {
match cs as u64 {
0..=0xFC => self.put_u8(cs as u8),
0xFD..=0xFFFF => {
self.put_u8(253);
self.put_u16(cs as u16);
}
0x10000..=0xFFFFFFFF => {
self.put_u8(254);
self.put_u32(cs as u32);
}
v => {
self.put_u8(255);
self.put_u64(v);
}
}
}
fn put_tu64(&mut self, u: TU64) {
// Fixme: (nepet) We trim leading zero bytes here as they
// cause some problems for the cln decoder - for now. Think
// about an appropriate solution.
let b: Vec<u8> = u
.to_be_bytes()
.iter()
.map(|x| x.clone())
.skip_while(|&x| x == 0)
.collect();
self.put_slice(&b);
}
}
impl ProtoBufMut for bytes::BytesMut {}
pub trait ToBytes: Sized {
fn to_bytes(s: Self) -> Vec<u8>;
}
impl ToBytes for SerializedTlvStream {
fn to_bytes(s: Self) -> Vec<u8> {
let mut b = bytes::BytesMut::new();
for e in s.entries.iter() {
b.put_compact_size(e.typ);
b.put_compact_size(e.value.len() as u64);
b.put(&e.value[..]);
}
b.to_vec()
}
}
impl<'de> Deserialize<'de> for SerializedTlvStream {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// Start by reading the hex-encoded string
let s: String = Deserialize::deserialize(deserializer)?;
let mut b: bytes::Bytes = hex::decode(s)
.map_err(|e| serde::de::Error::custom(e.to_string()))?
.into();
// Skip the length prefix
let l = b.get_compact_size();
let b = b.take(l as usize); // Protect against overruns
Self::from_bytes(b.into_inner()).map_err(|e| serde::de::Error::custom(e.to_string()))
}
}
impl std::fmt::Display for SerializedTlvStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = hex::encode(SerializedTlvStream::to_bytes(self.clone()));
write!(f, "{s}")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tlv_stream() {
let raw_hex = "fd80e9fd01046c6e62633130306e31706e6670757a677070356a73376e653571727465326d707874666d7a703638667838676a7376776b3034366c76357a76707a7832766d687478683763777364717163717a7a737871797a3576717370357a6d6b78726539686d6864617378336b75357070336a38366472337778656b78336437383363706c6161363068783870357564733971787071797367717132646c68656177796c677534346567393363766e78666e64747a646a6e647465666b726861727763746b3368783766656e67346179746e6a3277686d74716665636a7930776777396c6665727072386b686d64667771736e386d6d7a3776643565776a34756370656439787076fd80eb022742";
let raw = hex::decode(&raw_hex).unwrap();
let tlv_stream = SerializedTlvStream::from_bytes(raw).unwrap();
let invoice = tlv_stream.get(33001);
let amount_msat = tlv_stream.get(33003);
assert!(invoice.is_some());
assert!(amount_msat
.is_some_and(|v| u16::from_be_bytes(v.value[..].try_into().unwrap()) == 10050));
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/lib.rs | libs/gl-plugin/src/lib.rs | use anyhow::Result;
use cln_rpc;
use log::{debug, warn};
use serde_json::json;
use std::future::Future;
use std::sync::Arc;
use tokio::sync::broadcast;
#[macro_use(error)]
extern crate gl_util;
mod awaitables;
pub mod config;
pub mod hsm;
mod lsp;
pub mod messages;
pub mod node;
pub mod pb;
pub mod requests;
pub mod responses;
pub mod stager;
pub mod storage;
pub mod tlv;
pub mod tramp;
#[cfg(unix)]
mod unix;
mod context;
#[derive(Clone)]
pub struct GlPlugin {
stage: Arc<stager::Stage>,
events: broadcast::Sender<Event>,
}
/// A small wrapper around [`cln_plugin::Builder`] that allows us to
/// subscribe to events outside the plugin state itself, before
/// getting configured.
// TODO: Switch this out once the [`cln_plugin::Builder`] no longer
// pre-binds state
pub struct Builder {
inner: cln_plugin::Builder<GlPlugin, tokio::io::Stdin, tokio::io::Stdout>,
events: broadcast::Sender<Event>,
state: GlPlugin,
}
impl Builder {
pub fn subscribe_events(&self) -> broadcast::Receiver<Event> {
self.events.subscribe()
}
pub async fn start(self) -> Result<Option<Plugin>> {
self.inner.start(self.state).await
}
pub fn hook<C, F>(self, hookname: &str, callback: C) -> Self
where
C: Send + Sync + 'static,
C: Fn(cln_plugin::Plugin<GlPlugin>, serde_json::Value) -> F + 'static,
F: Future<Output = Result<serde_json::Value, anyhow::Error>> + Send + Sync + 'static,
{
Builder {
inner: self.inner.hook(hookname, callback),
..self
}
}
pub fn subscribe<C, F>(self, hookname: &str, callback: C) -> Self
where
C: Send + Sync + 'static,
C: Fn(cln_plugin::Plugin<GlPlugin>, serde_json::Value) -> F + 'static,
F: Future<Output = Result<(), anyhow::Error>> + Send + Sync + 'static,
{
Builder {
inner: self.inner.subscribe(hookname, callback),
..self
}
}
pub fn stage(&self) -> Arc<stager::Stage> {
self.state.stage.clone()
}
}
pub type Plugin = cln_plugin::Plugin<GlPlugin>;
impl GlPlugin {
pub fn get_stage(&self) -> Arc<stager::Stage> {
self.stage.clone()
}
}
/// Initialize the plugin, but don't start it yet. Allows attaching
/// additional methods, hooks, and subscriptions.
pub async fn init(
stage: Arc<stager::Stage>,
events: tokio::sync::broadcast::Sender<Event>,
) -> Result<Builder> {
let state = GlPlugin {
events: events.clone(),
stage,
};
let inner = cln_plugin::Builder::new(tokio::io::stdin(), tokio::io::stdout())
.hook("htlc_accepted", lsp::on_htlc_accepted)
.hook("invoice_payment", on_invoice_payment)
.hook("peer_connected", on_peer_connected)
.hook("openchannel", on_openchannel)
.hook("custommsg", on_custommsg);
Ok(Builder {
state,
inner,
events,
})
}
async fn on_custommsg(plugin: Plugin, v: serde_json::Value) -> Result<serde_json::Value> {
let call: messages::Custommsg = serde_json::from_value(v).unwrap();
debug!("Received a custommsg {:?}", &call);
let msg = pb::Custommsg {
peer_id: hex::decode(call.peer_id).unwrap(),
payload: hex::decode(call.payload).unwrap(),
};
// Custommsg delivery is best effort, so don't use the Result<>.
if let Err(e) = plugin.state().events.clone().send(Event::CustomMsg(msg)) {
log::debug!("Error sending custommsg to listeners: {}", e);
}
Ok(json!({"result": "continue"}))
}
/// Notification handler that receives notifications on successful
/// peer connections, then stores them into the `datastore` for future
/// reference.
async fn on_peer_connected(plugin: Plugin, v: serde_json::Value) -> Result<serde_json::Value> {
debug!("Got a successful peer connection: {:?}", v);
let call = serde_json::from_value::<messages::PeerConnectedCall>(v.clone()).unwrap();
let mut rpc = cln_rpc::ClnRpc::new(plugin.configuration().rpc_file).await?;
let req = cln_rpc::model::requests::DatastoreRequest {
key: vec![
"greenlight".to_string(),
"peerlist".to_string(),
call.peer.id.clone(),
],
string: Some(serde_json::to_string(&call.peer).unwrap()),
hex: None,
mode: Some(cln_rpc::model::requests::DatastoreMode::CREATE_OR_REPLACE),
generation: None,
};
// We ignore the response and continue anyways.
let res = rpc.call_typed(&req).await;
debug!("Got datastore response: {:?}", res);
Ok(json!({"result": "continue"}))
}
async fn on_openchannel(plugin: Plugin, v: serde_json::Value) -> Result<serde_json::Value> {
debug!("Received an openchannel request: {:?}", v);
let mut rpc = cln_rpc::ClnRpc::new(plugin.configuration().rpc_file).await?;
let req = cln_rpc::model::requests::ListdatastoreRequest {
key: Some(vec!["glconf".to_string(), "request".to_string()]),
};
let res = rpc.call_typed(&req).await;
debug!("ListdatastoreRequest response: {:?}", res);
match res {
Ok(res) => {
if !res.datastore.is_empty() {
match &res.datastore[0].string {
Some(serialized_request) => {
match _parse_gl_config_from_serialized_request(
serialized_request.to_string(),
) {
Some(gl_config) => {
return Ok(
json!({"result": "continue", "close_to": gl_config.close_to_addr}),
);
}
None => {
debug!("Failed to parse the GlConfig from the serialized request's payload");
}
}
}
None => {
debug!("Got empty response from datastore for key glconf.request");
}
}
}
return Ok(json!({"result": "continue"}));
}
Err(e) => {
log::debug!(
"An error occurred while searching for a custom close_to address: {}",
e
);
Ok(json!({"result": "continue"}))
}
}
}
fn _parse_gl_config_from_serialized_request(request: String) -> Option<pb::GlConfig> {
use prost::Message;
let gl_conf_req: crate::context::Request = serde_json::from_str(&request).unwrap();
let gl_conf_req: crate::pb::PendingRequest = gl_conf_req.into();
let payload = &gl_conf_req.request[5..];
let glconfig = crate::pb::GlConfig::decode(payload);
match glconfig {
Ok(glconfig) => Some(glconfig),
Err(e) => {
debug!("Failed to parse glconfig from string: {:?}", e);
None
}
}
}
/// Notification handler that receives notifications on incoming
/// payments, then looks up the invoice in the DB, and forwards the
/// full information to the GRPC interface.
async fn on_invoice_payment(plugin: Plugin, v: serde_json::Value) -> Result<serde_json::Value> {
log::info!("Got an incoming payment via invoice_payment: {:?}", v);
let state = plugin.state();
let call: messages::InvoicePaymentCall = match serde_json::from_value(v) {
Ok(v) => v,
Err(e) => {
log::error!("Could not decode the invoice_payment_call: {e}");
return Ok(json!({"result": "continue"}));
}
};
let mut rpc = cln_rpc::ClnRpc::new("lightning-rpc").await?;
let req = cln_rpc::model::requests::ListinvoicesRequest {
label: Some(call.payment.label.clone()),
invstring: None,
payment_hash: None,
offer_id: None,
index: None,
start: None,
limit: None,
};
let invoice = match rpc.call_typed(&req).await.unwrap().invoices.pop() {
Some(i) => i,
None => {
warn!(
"No invoice matching the notification label {} found",
call.payment.label
);
return Ok(json!({"result": "continue"}));
}
};
debug!(
"Retrieved matching invoice for invoice_payment: {:?}",
invoice
);
let amount: pb::Amount = call.payment.amount.try_into().unwrap();
let mut tlvs = vec![];
for t in call.payment.extratlvs.unwrap_or(vec![]) {
tlvs.push(t.into());
}
use crate::pb::incoming_payment::Details;
let details = pb::OffChainPayment {
label: invoice.label,
preimage: hex::decode(call.payment.preimage).unwrap(),
amount: Some(amount.try_into().unwrap()),
extratlvs: tlvs,
bolt11: invoice.bolt11.unwrap(),
// We unfortunately have to go the way of serializing and deserializing as the version of
// bitcoin_hashes we use (0.11.0) does not support the `Hash` trait that implements
// `to_bytes_array`. TODO: fix once we updated to a newer version of `bitcoin_hashes`.
payment_hash: hex::decode(invoice.payment_hash.to_string()).unwrap(),
};
let p = pb::IncomingPayment {
details: Some(Details::Offchain(details)),
};
match state.events.clone().send(Event::IncomingPayment(p)) {
Ok(_) => {}
Err(_) => warn!("No active listener for the incoming payment"),
}
Ok(json!({"result": "continue"}))
}
/// An event that we can observe during the operation of the plugin.
#[derive(Clone, Debug)]
pub enum Event {
Stop(Arc<stager::Stage>),
/// A grpc call. The first element is the URI of the request.
RpcCall(String),
IncomingPayment(pb::IncomingPayment),
CustomMsg(pb::Custommsg),
}
pub use cln_grpc as grpc;
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/pb.rs | libs/gl-plugin/src/pb.rs | tonic::include_proto!("greenlight");
use crate::{messages, requests, responses};
use cln_rpc::primitives;
impl HsmRequest {
pub fn get_type(&self) -> u16 {
(self.raw[0] as u16) << 8 | (self.raw[1] as u16)
}
}
impl From<u64> for Amount {
fn from(i: u64) -> Self {
Amount {
unit: Some(amount::Unit::Millisatoshi(i)),
}
}
}
impl From<messages::TlvField> for TlvField {
fn from(f: messages::TlvField) -> TlvField {
TlvField {
r#type: f.typ,
value: hex::decode(f.value).unwrap(),
}
}
}
impl From<cln_grpc::pb::RouteHop> for requests::RoutehintHopDev {
fn from(r: cln_grpc::pb::RouteHop) -> requests::RoutehintHopDev {
requests::RoutehintHopDev {
id: hex::encode(r.id),
short_channel_id: r.scid,
fee_base_msat: r.feebase.map(|f| f.msat),
fee_proportional_millionths: r.feeprop,
cltv_expiry_delta: r.expirydelta as u16,
}
}
}
impl From<cln_grpc::pb::InvoiceRequest> for requests::Invoice {
fn from(ir: cln_grpc::pb::InvoiceRequest) -> Self {
let fallbacks = (!ir.fallbacks.is_empty()).then(|| ir.fallbacks);
Self {
amount_msat: ir
.amount_msat
.map(|a| a.into())
.unwrap_or(primitives::AmountOrAny::Any),
description: ir.description,
dev_routes: None,
label: ir.label,
exposeprivatechannels: None,
preimage: ir.preimage.map(|p| hex::encode(p)),
expiry: ir.expiry,
fallbacks,
cltv: ir.cltv,
deschashonly: ir.deschashonly,
}
}
}
impl From<responses::Invoice> for cln_grpc::pb::InvoiceResponse {
fn from(i: responses::Invoice) -> Self {
cln_grpc::pb::InvoiceResponse {
bolt11: i.bolt11,
expires_at: i.expiry_time as u64,
payment_hash: hex::decode(i.payment_hash).unwrap(),
payment_secret: i
.payment_secret
.map(|s| hex::decode(s).unwrap())
.unwrap_or_default(),
warning_capacity: None,
warning_mpp: None,
warning_deadends: None,
warning_offline: None,
warning_private_unused: None,
created_index: None,
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/awaitables.rs | libs/gl-plugin/src/awaitables.rs | use cln_rpc::{
model::{
requests::{ConnectRequest, GetinfoRequest, GetrouteRequest, ListpeerchannelsRequest},
responses::GetrouteResponse,
},
primitives::{Amount, PublicKey, ShortChannelId},
ClnRpc,
};
use std::{
future::Future,
path::{Path, PathBuf},
pin::Pin,
time::Duration,
};
use thiserror;
use tokio::time::Instant;
// The delay between consecutive rpc calls of the same type.
const RPC_CALL_DELAY_MSEC: u64 = 250;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Unknown peer {0}")]
PeerUnknown(String),
#[error("Can't connect to peer {0}")]
PeerConnectionFailure(String),
#[error("Channel error: {0}")]
Channel(&'static str),
#[error("RPC error: {0}")]
Rpc(#[from] cln_rpc::RpcError),
#[error("Error talking to a GL service: {0}")]
Service(String),
}
/// A struct to track the status of a peer connection.
pub struct AwaitablePeer {
peer_id: PublicKey,
rpc_path: PathBuf,
ensure_peer_connection: Option<Pin<Box<dyn Future<Output = Result<(), Error>> + Send>>>,
}
impl AwaitablePeer {
pub fn new(peer_id: PublicKey, rpc_path: PathBuf) -> Self {
AwaitablePeer {
peer_id,
rpc_path,
ensure_peer_connection: None,
}
}
pub async fn wait(&mut self) -> Result<(), Error> {
ensure_peer_connection(&self.rpc_path, self.peer_id).await
}
}
impl Future for AwaitablePeer {
type Output = Result<(), Error>;
fn poll(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
// Ensure that the peer is connected.
if self.ensure_peer_connection.is_none() {
let fut = Box::pin(ensure_peer_connection(
self.rpc_path.clone(),
self.peer_id.clone(),
));
self.ensure_peer_connection = Some(fut);
}
let ensure_peer_connection = self.ensure_peer_connection.as_mut().unwrap();
match ensure_peer_connection.as_mut().poll(cx) {
std::task::Poll::Ready(result) => std::task::Poll::Ready(result),
std::task::Poll::Pending => std::task::Poll::Pending,
}
}
}
/// A struct to track the status of a channel. It implements `Future` to
/// await an operable channel state before returning the spendable amount
/// on this channel.
pub struct AwaitableChannel {
scid: ShortChannelId,
peer_id: PublicKey,
rpc_path: PathBuf,
version: Option<String>,
peer_connected: bool,
channel_ready: bool,
route_found: bool,
last_check: Option<Instant>,
rpc_call_delay: Duration,
get_version: Option<Pin<Box<dyn Future<Output = Result<String, Error>> + Send>>>,
ensure_peer_connection: Option<Pin<Box<dyn Future<Output = Result<(), Error>> + Send>>>,
billboard: Option<Pin<Box<dyn Future<Output = Result<Vec<String>, Error>> + Send>>>,
get_route: Option<Pin<Box<dyn Future<Output = Result<GetrouteResponse, Error>> + Send>>>,
spendable_msat: Option<Pin<Box<dyn Future<Output = Result<Amount, Error>> + Send>>>,
}
impl Future for AwaitableChannel {
type Output = Result<Amount, Error>;
fn poll(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let now = Instant::now();
if let Some(last_check) = self.last_check {
// We already checked and still need to wait before we retry
let elapsed = now.duration_since(last_check);
if elapsed < self.rpc_call_delay {
return std::task::Poll::Pending;
}
}
// Get version if not set already.
if self.version.is_none() {
if self.get_version.is_none() {
let fut = Box::pin(get_version(self.rpc_path.clone()));
self.get_version = Some(fut);
}
let get_version = self.get_version.as_mut().unwrap();
match get_version.as_mut().poll(cx) {
std::task::Poll::Ready(v) => {
self.version = Some(v?);
}
std::task::Poll::Pending => return std::task::Poll::Pending,
}
}
// Ensure that the peer is connected.
if !self.peer_connected {
if self.ensure_peer_connection.is_none() {
let fut = Box::pin(ensure_peer_connection(
self.rpc_path.clone(),
self.peer_id.clone(),
));
self.ensure_peer_connection = Some(fut);
}
let ensure_peer_connection = self.ensure_peer_connection.as_mut().unwrap();
match ensure_peer_connection.as_mut().poll(cx) {
std::task::Poll::Ready(result) => {
result?;
log::debug!("Peer {} is connected", self.peer_id.to_string());
self.peer_connected = true;
}
std::task::Poll::Pending => return std::task::Poll::Pending,
}
}
// Ensure that the channel is reestablished.
if !self.channel_ready {
if self.billboard.is_none() {
let fut = Box::pin(billboard(
self.rpc_path.clone(),
self.version.as_ref().unwrap().clone(),
self.peer_id.clone(),
self.scid,
));
self.billboard = Some(fut);
}
let billboard = self.billboard.as_mut().unwrap();
match billboard.as_mut().poll(cx) {
std::task::Poll::Ready(result) => {
let result = result?;
if !result.into_iter().any(|s| {
s.find("Channel ready").is_some()
|| s.find("Reconnected, and reestablished").is_some()
}) {
// Reset billboard and last_check to back-off for a bit.
self.last_check = Some(Instant::now());
self.billboard = None;
return std::task::Poll::Pending;
}
log::debug!("Channel {} is established", self.scid);
self.channel_ready = true;
}
std::task::Poll::Pending => return std::task::Poll::Pending,
}
}
// Ensure that the channel can be used to route an htlc to the peer.
if !self.route_found {
if self.get_route.is_none() {
let fut = Box::pin(get_route(self.rpc_path.clone(), self.peer_id.clone()));
self.get_route = Some(fut);
}
let get_route = self.get_route.as_mut().unwrap();
match get_route.as_mut().poll(cx) {
std::task::Poll::Ready(route) => {
if route.is_ok() {
log::debug!("Peer {:?} is routable", self.peer_id.to_string());
self.route_found = true;
} else {
// Reset get_route and last_check to back-off for a bit.
self.last_check = Some(Instant::now());
self.get_route = None;
return std::task::Poll::Pending;
};
}
std::task::Poll::Pending => return std::task::Poll::Pending,
}
}
// Return the amount that can be send via this channel.
if self.spendable_msat.is_none() {
let fut = Box::pin(spendable_msat(
self.rpc_path.clone(),
self.version.as_ref().unwrap().clone(),
self.peer_id.clone(),
self.scid,
));
self.spendable_msat = Some(fut);
}
let spendable_msat = self.spendable_msat.as_mut().unwrap();
match spendable_msat.as_mut().poll(cx) {
std::task::Poll::Ready(amount) => std::task::Poll::Ready(amount),
std::task::Poll::Pending => std::task::Poll::Pending,
}
}
}
impl AwaitableChannel {
pub async fn new(peer_id: PublicKey, scid: ShortChannelId, rpc_path: PathBuf) -> Self {
AwaitableChannel {
peer_id,
scid,
rpc_path,
version: None,
peer_connected: false,
channel_ready: false,
route_found: false,
last_check: None,
rpc_call_delay: Duration::from_millis(RPC_CALL_DELAY_MSEC),
get_version: None,
ensure_peer_connection: None,
billboard: None,
get_route: None,
spendable_msat: None,
}
}
}
async fn connect(rpc_path: impl AsRef<Path>) -> Result<ClnRpc, Error> {
ClnRpc::new(rpc_path)
.await
.map_err(|e| Error::Service(format!("cant connect to rpc {}", e.to_string())))
}
/// Try to connect to the peer if we are not already connected.
async fn ensure_peer_connection(
rpc_path: impl AsRef<Path>,
peer_id: PublicKey,
) -> Result<(), Error> {
log::debug!("Checking if peer {} is connected", peer_id);
let mut rpc = connect(rpc_path).await?;
let res = rpc
.call_typed(&cln_rpc::model::requests::ListpeersRequest {
id: Some(peer_id),
level: None,
})
.await?;
let peer = res
.peers
.first()
.ok_or(Error::PeerUnknown(peer_id.to_string()))?;
if !peer.connected {
log::debug!("Peer {} is not connected, connecting", peer_id);
let req = ConnectRequest {
id: peer_id.to_string(),
host: None,
port: None,
};
let res = rpc
.call_typed(&req)
.await
.map_err(|_| Error::PeerConnectionFailure(peer_id.to_string()))?;
log::debug!("Connect call to {} resulted in {:?}", peer_id, res);
}
Ok(())
}
async fn get_version(rpc_path: impl AsRef<Path>) -> Result<String, Error> {
let mut rpc = connect(rpc_path).await?;
let info = rpc.call_typed(&GetinfoRequest {}).await?;
Ok(info.version)
}
async fn billboard(
rpc_path: impl AsRef<Path>,
version: String,
peer_id: PublicKey,
scid: ShortChannelId,
) -> Result<Vec<String>, Error> {
let mut rpc = connect(rpc_path).await?;
if *version >= *"v23.05gl1" {
Ok(rpc
.call_typed(&ListpeerchannelsRequest { id: Some(peer_id) })
.await
.map_err(|e| Error::Rpc(e))?
.channels
.into_iter()
.filter(|c| {
c.short_channel_id == Some(scid)
|| c.alias.clone().and_then(|a| a.local) == Some(scid)
})
.nth(0)
.ok_or(Error::Channel(
"Could not find the channel in listpeerchannels",
))?
.status
.ok_or(Error::Channel("Status not found"))?)
} else {
return Err(Error::Service(format!(
"Not supported in this version of core-lightning: {}, need at least v23.05gl1",
version,
)));
}
}
async fn get_route(
rpc_path: impl AsRef<Path>,
peer_id: PublicKey,
) -> Result<GetrouteResponse, Error> {
let mut rpc = connect(rpc_path).await?;
Ok(rpc
.call_typed(&GetrouteRequest {
id: peer_id,
amount_msat: cln_rpc::primitives::Amount::from_msat(1),
riskfactor: 1,
cltv: None,
fromid: None,
fuzzpercent: Some(0),
exclude: None,
maxhops: Some(1),
})
.await?)
}
async fn spendable_msat(
rpc_path: impl AsRef<Path>,
version: String,
peer_id: PublicKey,
scid: ShortChannelId,
) -> Result<Amount, Error> {
let mut rpc = connect(rpc_path).await?;
if *version >= *"v23.05gl1" {
Ok(rpc
.call_typed(&ListpeerchannelsRequest { id: Some(peer_id) })
.await
.map_err(|e| Error::Rpc(e))?
.channels
.into_iter()
.filter(|c| {
c.short_channel_id == Some(scid)
|| c.alias.clone().and_then(|a| a.local) == Some(scid)
})
.nth(0)
.ok_or(Error::Channel(
"Could not find the channel in listpeerchannels",
))?
.spendable_msat
.ok_or(Error::Channel("No amount found"))?)
} else {
return Err(Error::Service(format!(
"Not supported in this version of core-lightning: {}, need at least v23.05gl1",
version,
)));
}
}
pub fn assert_send<T: Send>(_: T) {}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/storage.rs | libs/gl-plugin/src/storage.rs | //! A backend to store the signer state in.
pub use gl_client::persist::State;
use log::debug;
use thiserror::Error;
use tonic::async_trait;
#[derive(Debug, Error)]
pub enum Error {
/// underlying database error
#[error("database error: {0}")]
Sled(#[from] ::sled::Error),
#[error("state corruption: {0}")]
CorruptState(#[from] serde_json::Error),
#[error("unhandled error: {0}")]
Other(Box<dyn std::error::Error + Send + Sync>),
}
#[async_trait]
pub trait StateStore: Send + Sync {
async fn write(&self, state: State) -> Result<(), Error>;
async fn read(&self) -> Result<State, Error>;
}
/// A StateStore that uses `sled` as its storage backend
pub struct SledStateStore {
db: sled::Db,
}
impl SledStateStore {
pub fn new(path: std::path::PathBuf) -> Result<SledStateStore, sled::Error> {
let db = sled::open(path)?;
Ok(Self { db })
}
}
use sled::transaction::TransactionError;
impl From<TransactionError<Error>> for Error {
fn from(e: TransactionError<Error>) -> Self {
match e {
TransactionError::Abort(e) => e,
TransactionError::Storage(e) => Error::Sled(e),
}
}
}
const SLED_KEY: &str = "signer_state";
#[async_trait]
impl StateStore for SledStateStore {
async fn read(&self) -> Result<State, Error> {
match self.db.get(SLED_KEY)? {
None => {
debug!("Initializing a new signer state");
Ok(State::new())
}
Some(v) => Ok(serde_json::from_slice(&v)?),
}
}
async fn write(&self, state: State) -> Result<(), Error> {
let raw = serde_json::to_vec(&state)?;
self.db
.insert(SLED_KEY, raw)
.map(|_v| ())
.map_err(|e| e.into())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/lsp.rs | libs/gl-plugin/src/lsp.rs | //! LSP integrations and related code.
use crate::{
tlv::{self, ProtoBufMut},
Plugin,
};
use anyhow::Context;
use bytes::BufMut;
use cln_rpc::primitives::{Amount, ShortChannelId};
use serde::{Deserialize, Serialize};
use serde_json::Value;
#[derive(Debug, Deserialize)]
#[allow(unused)]
struct Onion {
payload: tlv::SerializedTlvStream,
short_channel_id: Option<ShortChannelId>,
forward_msat: Option<Amount>,
outgoing_cltv_value: Option<u32>,
#[serde(deserialize_with = "from_hex")]
shared_secret: Vec<u8>,
#[serde(deserialize_with = "from_hex")]
next_onion: Vec<u8>,
total_msat: Option<Amount>,
}
#[derive(Debug, Deserialize)]
#[allow(unused)]
struct Htlc {
short_channel_id: ShortChannelId,
//id: u64,
amount_msat: Amount,
//cltv_expiry: u32,
//cltv_expiry_relative: u16,
#[serde(deserialize_with = "from_hex")]
payment_hash: Vec<u8>,
}
#[derive(Debug, Deserialize)]
#[allow(unused)]
struct HtlcAcceptedRequest {
onion: Onion,
htlc: Htlc,
//forward_to: Vec<u8>,
}
#[derive(Debug, Serialize, Default)]
struct HtlcAcceptedResponse {
result: String,
#[serde(skip_serializing_if = "Option::is_none", serialize_with = "to_hex")]
payload: Option<Vec<u8>>,
}
const TLV_FORWARD_AMT: u64 = 2;
const TLV_PAYMENT_SECRET: u64 = 8;
/// A macro to break out of the current hook flow and return a `continue`
/// signal to core-lightning. This is to be used when we don't know how to
/// handle a given payload or as a shortcut in case we could identify that the
/// incoming htlc is not part of a LSP jit channel opening.
macro_rules! unwrap_or_continue {
($res:expr) => {
match $res {
Ok(x) => x,
Err(e) => {
log::debug!("Lsp-plugin continue, reason: {}", e.to_string());
return Ok(serde_json::to_value(HtlcAcceptedResponse {
result: "continue".to_string(),
..Default::default()
})
.expect("Could not serialize json value"));
}
}
};
}
pub async fn on_htlc_accepted(plugin: Plugin, v: Value) -> Result<Value, anyhow::Error> {
let req: HtlcAcceptedRequest = unwrap_or_continue!(serde_json::from_value(v));
log::debug!("Decoded {:?}", &req);
let htlc_amt = req.htlc.amount_msat;
let onion_amt = unwrap_or_continue!(req.onion.forward_msat.ok_or(format!(
"payload={} is missing forward_msat",
&req.onion.payload
)));
let res = if htlc_amt.msat() < onion_amt.msat() {
log::info!(
"Potential JIT LSP payment detected: htlc_amount={}msat < onion_amount={}msat",
htlc_amt.msat(),
onion_amt.msat()
);
let mut payload = req.onion.payload.clone();
payload.set_tu64(TLV_FORWARD_AMT, htlc_amt.msat());
let payment_secret = unwrap_or_continue!(payload.get(TLV_PAYMENT_SECRET).ok_or(format!(
"payload={} is missing payment_secret",
&payload.to_string()
)));
let mut rpc = cln_rpc::ClnRpc::new(plugin.configuration().rpc_file).await?;
let res: cln_rpc::model::responses::ListinvoicesResponse = rpc
.call_typed(&cln_rpc::model::requests::ListinvoicesRequest {
payment_hash: Some(hex::encode(&req.htlc.payment_hash)),
label: None,
offer_id: None,
invstring: None,
start: None,
index: None,
limit: None,
})
.await?;
let invoice = unwrap_or_continue!(res.invoices.first().ok_or(format!(
"no invoice matching incoming HTLC payment_hash={} found",
hex::encode(&req.htlc.payment_hash),
)));
let total_msat = unwrap_or_continue!(invoice
.amount_msat
.ok_or("invoice has no total amount msat"));
let mut ps = bytes::BytesMut::new();
ps.put(&payment_secret.value[0..32]);
ps.put_tu64(total_msat.msat());
payload.set_bytes(TLV_PAYMENT_SECRET, ps);
log::info!(
"Amended onion payload with forward_amt={}msat and total_msat={}msat (from invoice)",
htlc_amt.msat(),
total_msat.msat(),
);
let payload = tlv::SerializedTlvStream::to_bytes(payload);
log::debug!("Serialized payload: {}", hex::encode(&payload));
use tlv::ToBytes;
HtlcAcceptedResponse {
result: "continue".to_string(),
payload: Some(payload),
}
} else {
log::info!("HTLC amount matches onion payload amount, deferring to lightningd");
HtlcAcceptedResponse {
result: "continue".to_string(),
..Default::default()
}
};
serde_json::to_value(res).context("serialize result")
}
use hex::FromHex;
use serde::{Deserializer, Serializer};
/// Serializes `buffer` to a lowercase hex string.
pub fn to_hex<T, S>(buffer: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
where
T: AsRef<[u8]>,
S: Serializer,
{
match buffer {
None => serializer.serialize_none(),
Some(buffer) => serializer.serialize_str(&hex::encode(buffer)),
}
}
/// Deserializes a lowercase hex string to a `Vec<u8>`.
pub fn from_hex<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
where
D: Deserializer<'de>,
{
use serde::de::Error;
String::deserialize(deserializer)
.and_then(|string| Vec::from_hex(string).map_err(|err| Error::custom(err.to_string())))
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/hsm.rs | libs/gl-plugin/src/hsm.rs | //! Service used to talk to the `hsmd` that is passing us the signer
//! requests.
use crate::config::NodeInfo;
use crate::pb::{hsm_server::Hsm, Empty, HsmRequest, HsmResponse, NodeConfig};
use crate::stager;
use anyhow::{Context, Result};
use futures::TryFutureExt;
use log::{debug, info, trace, warn};
use std::path::PathBuf;
use std::sync::Arc;
use tonic::{Request, Response, Status};
/// The StagingHsmServer is used by the plugin to receive incoming requests
/// from the hsmproxy and stages the requests for clients of the Node
/// interface to stream and reply to.
#[derive(Clone)]
pub struct StagingHsmServer {
stage: Arc<stager::Stage>,
hsmd_sock_path: PathBuf,
node_info: NodeInfo,
node_config: NodeConfig,
}
impl StagingHsmServer {
pub fn new(
hsmd_sock_path: PathBuf,
stage: Arc<stager::Stage>,
node_info: NodeInfo,
node_config: NodeConfig,
) -> StagingHsmServer {
StagingHsmServer {
stage,
hsmd_sock_path,
node_info,
node_config,
}
}
/// We have some canned responses from the signer, this gives us access.
fn find_canned_response(&self, msg: &Vec<u8>) -> Option<Vec<u8>> {
self.node_config
.startupmsgs
.iter()
.find(|m| &m.request == msg)
.map(|m| m.response.clone())
}
}
#[tonic::async_trait]
impl Hsm for StagingHsmServer {
async fn request(&self, request: Request<HsmRequest>) -> Result<Response<HsmResponse>, Status> {
let req = request.into_inner();
debug!("Received request from hsmproxy: {:?}", req);
// Start by looking in the canned responses and return it if it is known
if let Some(response) = self.find_canned_response(&req.raw) {
debug!(
"Returning canned response={:?} for request={:?}",
response, req.raw
);
return Ok(Response::new(HsmResponse {
request_id: req.request_id,
raw: response,
signer_state: Vec::new(),
error: "".into(),
}));
} else if req.get_type() == 11 {
debug!("Returning stashed init msg: {:?}", self.node_info.initmsg);
return Ok(Response::new(HsmResponse {
request_id: req.request_id,
raw: self.node_info.initmsg.clone(),
signer_state: Vec::new(), // the signerproxy doesn't care about state
error: "".into(),
}));
} else if req.get_type() == 33 {
debug!("Returning stashed dev-memleak response");
return Ok(Response::new(HsmResponse {
request_id: req.request_id,
raw: vec![0, 133, 0],
signer_state: Vec::new(), // the signerproxy doesn't care about state
error: "".into(),
}));
}
let mut chan = match self.stage.send(req).await {
Err(e) => {
return Err(Status::unknown(format!(
"Error while queuing request from node: {:?}",
e
)))
}
Ok(c) => c,
};
let res = match chan.recv().await {
None => {
return Err(Status::unknown(format!(
"Channel closed while waiting for response",
)))
}
Some(r) => r,
};
Ok(Response::new(res))
}
async fn ping(&self, _request: Request<Empty>) -> Result<Response<Empty>, Status> {
trace!("Got a ping");
Ok(Response::new(Empty::default()))
}
}
impl StagingHsmServer {
pub async fn run(self) -> Result<()> {
let mut path = std::path::PathBuf::new();
path.push(std::env::current_dir().unwrap());
path.push(&self.hsmd_sock_path);
info!(
"Configuring hsmd interface to listen on {}",
path.to_str().unwrap()
);
std::fs::create_dir_all(std::path::Path::new(&path).parent().unwrap())?;
if path.exists() {
warn!(
"Socket path {} already exists, deleting",
path.to_string_lossy()
);
std::fs::remove_file(&path).context("removing stale hsmd socket")?;
}
let incoming = {
let uds = tokio::net::UnixListener::bind(path)?;
async_stream::stream! {
loop {
yield uds.accept().map_ok(|(st, _)| crate::unix::UnixStream(st)).await;
}
}
};
info!("HSM server interface starting.");
tonic::transport::Server::builder()
.add_service(crate::pb::hsm_server::HsmServer::new(self))
.serve_with_incoming(incoming)
.await
.context("serving HsmServer interface")
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/stager.rs | libs/gl-plugin/src/stager.rs | /// A simple staging mechanism for incoming requests so we can invert from
/// pull to push. Used by `hsmproxy` to stage requests that can then
/// asynchronously be retrieved and processed by one or more client
/// devices.
use crate::pb;
use anyhow::{anyhow, Error};
use log::{debug, trace, warn};
use std::collections;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use tokio::sync::{broadcast, mpsc, Mutex};
#[derive(Debug)]
pub struct Stage {
requests: Mutex<collections::HashMap<u32, Request>>,
notify: broadcast::Sender<Request>,
hsm_connections: Arc<AtomicUsize>,
}
#[derive(Clone, Debug)]
pub struct Request {
pub request: pb::HsmRequest,
pub response: mpsc::Sender<pb::HsmResponse>,
pub start_time: tokio::time::Instant,
}
impl Stage {
pub fn new() -> Self {
let (notify, _) = broadcast::channel(1000);
Stage {
requests: Mutex::new(collections::HashMap::new()),
notify: notify,
hsm_connections: Arc::new(AtomicUsize::new(0)),
}
}
pub async fn send(
&self,
request: pb::HsmRequest,
) -> Result<mpsc::Receiver<pb::HsmResponse>, Error> {
let mut requests = self.requests.lock().await;
let (response, receiver): (
mpsc::Sender<pb::HsmResponse>,
mpsc::Receiver<pb::HsmResponse>,
) = mpsc::channel(1);
let r = Request {
request,
response,
start_time: tokio::time::Instant::now(),
};
requests.insert(r.request.request_id, r.clone());
if let Err(_) = self.notify.send(r) {
warn!("Error notifying hsmd request stream, likely lost connection.");
}
Ok(receiver)
}
pub async fn mystream(&self) -> StageStream {
let requests = self.requests.lock().await;
self.hsm_connections.fetch_add(1, Ordering::Relaxed);
StageStream {
backlog: requests.values().map(|e| e.clone()).collect(),
bcast: self.notify.subscribe(),
hsm_connections: self.hsm_connections.clone(),
}
}
pub async fn respond(&self, response: pb::HsmResponse) -> Result<(), Error> {
let mut requests = self.requests.lock().await;
match requests.remove(&response.request_id) {
Some(req) => {
debug!(
"Response for request_id={}, signer_rtt={}s, outstanding requests count={}",
response.request_id,
req.start_time.elapsed().as_secs_f64(),
requests.len()
);
if let Err(e) = req.response.send(response).await {
Err(anyhow!("Error sending request to requester: {:?}", e))
} else {
Ok(())
}
}
None => {
trace!(
"Request {} not found, is this a duplicate result?",
response.request_id
);
Ok(())
}
}
}
pub async fn is_stuck(&self) -> bool {
let sticky_types: Vec<u16> = vec![5, 28];
let sticky: Vec<Request> = self
.requests
.lock()
.await
.values()
.filter(|r| {
let head: [u16; 2] = [r.request.raw[0].into(), r.request.raw[1].into()];
let typ = head[0] << 8 | head[1];
sticky_types.contains(&typ)
})
.map(|r| r.clone())
.collect();
trace!("Found {:?} sticky requests.", sticky);
sticky.len() != 0
}
}
pub struct StageStream {
backlog: Vec<Request>,
bcast: broadcast::Receiver<Request>,
hsm_connections: Arc<AtomicUsize>,
}
impl StageStream {
pub async fn next(&mut self) -> Result<Request, Error> {
if self.backlog.len() > 0 {
let req = self.backlog[0].clone();
self.backlog.remove(0);
Ok(req)
} else {
match self.bcast.recv().await {
Ok(r) => Ok(r),
Err(e) => Err(anyhow!("error waiting for more requests: {:?}", e)),
}
}
}
}
impl Drop for StageStream {
fn drop(&mut self) {
self.hsm_connections.fetch_sub(1, Ordering::Relaxed);
}
}
#[cfg(test)]
mod test {
use super::*;
use std::time::Duration;
use tokio::time::sleep as delay_for;
#[tokio::test]
async fn test_live_stream() {
let stage = Stage::new();
let mut responses = vec![];
for i in 0..10 {
responses.push(
stage
.send(pb::HsmRequest {
request_id: i,
context: None,
raw: vec![],
signer_state: vec![],
requests: vec![],
})
.await
.unwrap(),
);
}
let mut s1 = stage.mystream().await;
let mut s2 = stage.mystream().await;
let f1 = tokio::spawn(async move {
while let Ok(r) = s1.next().await {
eprintln!("hsmd {} is handling request {}", 1, r.request.request_id);
match r
.response
.send(pb::HsmResponse {
request_id: r.request.request_id,
raw: vec![],
signer_state: vec![],
error: "".into(),
})
.await
{
Ok(_) => {}
Err(e) => eprintln!("errror {:?}", e),
}
delay_for(Duration::from_millis(10)).await;
}
});
let f2 = tokio::spawn(async move {
while let Ok(r) = s2.next().await {
eprintln!("hsmd {} is handling request {}", 2, r.request.request_id);
match r
.response
.send(pb::HsmResponse {
request_id: r.request.request_id,
raw: vec![],
signer_state: vec![],
error: "".into(),
})
.await
{
Ok(_) => {}
Err(e) => eprintln!("errror {:?}", e),
}
delay_for(Duration::from_millis(10)).await;
}
});
for i in 10..100 {
responses.push(
stage
.send(pb::HsmRequest {
request_id: i,
context: None,
raw: vec![],
signer_state: vec![],
requests: vec![],
})
.await
.unwrap(),
);
}
for mut r in responses {
let resp = r.recv().await.unwrap();
eprintln!("Got response {:?}", resp);
}
drop(stage);
f1.await.unwrap();
f2.await.unwrap();
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/context.rs | libs/gl-plugin/src/context.rs | //! Manage a signature request context.
//!
//! The signature request context is composed of any currently pending grpc request (serialized as byte string), along with a public key (corresponding to the caller's mTLS certificate), an attestation (signature) by the signer about the authenticity of this public key, as well as a signature from the caller's public key over the serialized payload.
//!
//! It is used by the signer to verify that:
//!
//! a) The caller is authenticated and authorized to initiate the
//! action with the grpc call.
//! b) Verify that the changes that the signer is being asked to
//! sign off actually match the authentic commands by a valid
//! caller.
use std::sync::Arc;
use tokio::sync::Mutex;
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Request {
// The caller's mTLS public key
pubkey: Vec<u8>,
// A signature by the caller's public key, authenticating the
// payload.
signature: Vec<u8>,
// The callers rune that is used to authorize the request.
rune: Vec<u8>,
// The serialized grpc call, transferred as serialized String to
// avoid breaking the signature.
payload: bytes::Bytes,
// The URI that the request asked for
uri: String,
// Timestamp in millis
timestamp: Option<u64>,
}
impl Request {
pub fn new(
uri: String,
payload: bytes::Bytes,
pubkey: Vec<u8>,
signature: Vec<u8>,
timestamp: Option<u64>,
rune: Vec<u8>,
) -> Self {
Request {
uri,
payload,
signature,
timestamp,
pubkey,
rune,
}
}
}
impl From<Request> for crate::pb::PendingRequest {
fn from(r: crate::context::Request) -> Self {
crate::pb::PendingRequest {
pubkey: r.pubkey,
signature: r.signature,
request: r.payload.to_vec(),
uri: r.uri,
timestamp: r.timestamp.unwrap_or_default(),
rune: r.rune,
}
}
}
#[derive(Clone, Debug)]
pub struct Context {
// List of currently pending requests.
requests: Arc<Mutex<Vec<Request>>>,
}
impl Context {
pub fn new() -> Self {
Context {
requests: Arc::new(Mutex::new(Vec::new())),
}
}
pub async fn snapshot(&self) -> Vec<Request> {
let r = self.requests.lock().await;
r.clone()
}
pub async fn add_request(&self, r: Request) {
let mut reqs = self.requests.lock().await;
reqs.push(r);
}
pub async fn remove_request(&self, r: Request) {
let mut reqs = self.requests.lock().await;
reqs.retain(|a| a.signature != r.signature)
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/requests.rs | libs/gl-plugin/src/requests.rs | pub use clightningrpc::requests::*;
use serde::{Serialize, Serializer};
#[derive(Debug, Clone)]
pub struct Outpoint {
pub txid: Vec<u8>,
pub outnum: u16,
}
#[derive(Debug, Clone)]
pub enum Amount {
Millisatoshi(u64),
Satoshi(u64),
Bitcoin(u64),
All,
Any,
}
impl Serialize for Amount {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
Amount::Millisatoshi(a) => serializer.serialize_str(&format!("{}msat", a)),
Amount::Satoshi(a) => serializer.serialize_str(&format!("{}sat", a)),
Amount::Bitcoin(a) => serializer.serialize_str(&format!("{}btc", a)),
Amount::All => serializer.serialize_str(&format!("all")),
Amount::Any => serializer.serialize_str(&format!("any")),
}
}
}
#[derive(Clone, Debug)]
pub enum Feerate {
Normal,
Slow,
Urgent,
PerKw(u64),
PerKb(u64),
}
impl Serialize for Feerate {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
Feerate::Normal => serializer.serialize_str("normal"),
Feerate::Slow => serializer.serialize_str("slow"),
Feerate::Urgent => serializer.serialize_str("urgent"),
Feerate::PerKb(n) => serializer.serialize_str(&format!("{}perkb", n)),
Feerate::PerKw(n) => serializer.serialize_str(&format!("{}perkw", n)),
}
}
}
impl Serialize for Outpoint {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&format!("{}:{}", hex::encode(&self.txid), self.outnum))
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Withdraw {
pub destination: String,
#[serde(rename = "satoshi")]
pub amount: Amount,
#[serde(skip_serializing_if = "Option::is_none")]
pub feerate: Option<Feerate>,
#[serde(skip_serializing_if = "Option::is_none")]
pub minconf: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub utxos: Option<Vec<Outpoint>>,
}
#[derive(Debug, Clone, Serialize)]
pub struct FundChannel {
pub id: String,
pub amount: Amount,
#[serde(skip_serializing_if = "Option::is_none")]
pub feerate: Option<Feerate>,
#[serde(skip_serializing_if = "Option::is_none")]
pub announce: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub minconf: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub close_to: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct CloseChannel {
#[serde(rename = "id")]
pub node_id: String,
#[serde(rename = "unilateraltimeout", skip_serializing_if = "Option::is_none")]
pub timeout: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub destination: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct Invoice {
pub amount_msat: cln_rpc::primitives::AmountOrAny,
pub description: String,
pub label: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub expiry: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub fallbacks: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub preimage: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cltv: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub deschashonly: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exposeprivatechannels: Option<Vec<String>>,
#[serde(rename = "dev-routes", skip_serializing_if = "Option::is_none")]
pub dev_routes: Option<Vec<Vec<RoutehintHopDev>>>,
}
#[derive(Debug, Clone, Serialize)]
pub struct ListFunds {}
#[derive(Debug, Clone, Serialize)]
pub struct Pay {
pub bolt11: String,
#[serde(rename = "msatoshi", skip_serializing_if = "Option::is_none")]
pub amount: Option<Amount>,
#[serde(skip_serializing_if = "Option::is_none")]
pub retry_for: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub maxfeepercent: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub maxfee: Option<Amount>,
}
#[derive(Debug, Clone, Serialize)]
pub struct ListPays {
#[serde(skip_serializing_if = "Option::is_none")]
pub bolt11: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub payment_hash: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct ListInvoices {
#[serde(skip_serializing_if = "Option::is_none")]
pub label: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub invstring: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub payment_hash: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct RoutehintHop {
pub id: String,
pub scid: String,
pub feebase: u64,
pub feeprop: u32,
pub expirydelta: u16,
}
// This variant is used by dev-routes, using slightly different key names.
// TODO Remove once we have consolidated the routehint format.
#[derive(Debug, Clone, Serialize)]
pub struct RoutehintHopDev {
pub id: String,
pub short_channel_id: String,
pub fee_base_msat: Option<u64>,
pub fee_proportional_millionths: u32,
pub cltv_expiry_delta: u16,
}
use std::collections::HashMap;
#[derive(Debug, Clone, Serialize)]
pub struct Keysend {
pub destination: String,
pub msatoshi: Amount,
pub label: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub maxfeepercent: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub retry_for: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub maxdelay: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exemptfee: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub routehints: Option<Vec<Vec<RoutehintHop>>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub extratlvs: Option<HashMap<u64, String>>,
}
#[derive(Debug, Clone, Serialize)]
pub struct ListIncoming {}
#[derive(Debug, Clone, Serialize)]
pub struct LspInvoiceRequest {
pub lsp_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub token: Option<String>,
pub amount_msat: cln_rpc::primitives::AmountOrAny,
pub description: String,
pub label: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct LspGetinfoRequest {
pub lsp_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub token: Option<String>,
}
use cln_rpc::model::TypedRequest;
impl From<crate::pb::LspInvoiceRequest> for LspInvoiceRequest {
fn from(o: crate::pb::LspInvoiceRequest) -> LspInvoiceRequest {
LspInvoiceRequest {
lsp_id: o.lsp_id,
token: match o.token.as_ref() {
"" => None,
o => Some(o.to_owned()),
},
amount_msat: match o.amount_msat {
0 => cln_rpc::primitives::AmountOrAny::Any,
o => cln_rpc::primitives::AmountOrAny::Amount(
cln_grpc::pb::Amount { msat: o }.into(),
),
},
description: o.description,
label: o.label,
}
}
}
impl TypedRequest for LspInvoiceRequest {
type Response = super::responses::InvoiceResponse;
fn method(&self) -> &str {
// TODO Rename after the CLN rename has been deployed.
// "lsps-lsps2-invoice"
"lsps-jitchannel"
}
}
impl TypedRequest for LspGetinfoRequest {
type Response = super::responses::LspGetinfoResponse;
fn method(&self) -> &str {
"lsps-lsps2-getinfo"
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_invoice_response() {
let tests = vec![
(crate::pb::LspInvoiceRequest {
lsp_id: "lsp_id".to_owned(),
token: "".to_owned(),
amount_msat: 0,
description: "description".to_owned(),
label: "label".to_owned(),
}),
crate::pb::LspInvoiceRequest {
lsp_id: "lsp_id".to_owned(),
token: "token".to_owned(),
amount_msat: 1337,
description: "description".to_owned(),
label: "label".to_owned(),
},
];
for t in tests {
let _actual: super::LspInvoiceRequest = t.into();
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/tramp.rs | libs/gl-plugin/src/tramp.rs | use crate::awaitables::{AwaitableChannel, AwaitablePeer, Error as AwaitablePeerError};
use crate::pb;
use cln_rpc::{
primitives::{Amount, PublicKey, ShortChannelId},
ClnRpc, RpcError,
};
use futures::{future::join_all, FutureExt};
use gl_util::error::{ClnRpcError, Error, ErrorCode, ErrorStatusConversionExt, RpcErrConversion};
use log::{debug, warn};
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::path::PathBuf;
use std::str::FromStr;
use std::{path::Path, time::Duration};
use tokio::time::{timeout_at, Instant};
// Feature bit used to signal trampoline support.
const TRAMPOLINE_FEATURE_BIT: usize = 427;
// BOLT#4 default value:
// https://github.com/lightning/bolts/blob/master/04-onion-routing.md#max-htlc-cltv-selection
const MAX_DELAY_DEFAULT: u32 = 2016;
// The amount we overpay to allow the trampoline node to spend some fees.
const DEFAULT_OVERPAY_PERCENT: f32 = 0.5;
// Type used to address bolt11 in the onion payload.
const TLV_BOLT11: u64 = 33001;
// Type used to address the amount in msat in the onion payload, in case
// that the bolt11 does not have an amount set.
const TLV_AMT_MSAT: u64 = 33003;
// Error Message that CLN returns on an unknown onion error. This is the
// case when the trampoline server rejected with a custom error type.
const PAY_UNPARSEABLE_ONION_MSG: &str = "Malformed error reply";
const PAY_UNPARSEABLE_ONION_CODE: i32 = 202;
// How long do we wait for channels to re-establish?
const AWAIT_CHANNELS_TIMEOUT_SEC: u64 = 20;
/// Converts Core Lightning RPC errors into trampoline errors.
impl RpcErrConversion for TrampolineErrorCode {
fn from_rpc_error(value: &RpcError) -> Self {
TrampolineErrorCode::RpcError(value.code.unwrap_or(-1))
}
}
/// Error codes specific to trampoline routing operations.
///
/// These error codes cover various failure scenarios in Lightning Network
/// trampoline routing, from configuration issues to payment failures.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum TrampolineErrorCode {
FeatureNotSupported,
InvalidNodeId,
NetworkError,
UnknownPeer,
MissingAmount,
AmbigousAmount,
MissingChannel,
InsufficientFunds,
InvalidInvoice,
PeerNodeFailure,
PaymentFailure,
PeerConnectionFailure,
MissingPreimage,
Internal,
RpcError(ClnRpcError),
}
impl core::fmt::Display for TrampolineErrorCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.code())
}
}
/// Maps trampoline error codes to numeric values and back.
///
/// Error code allocation:
/// - 42701-42714: Reserved for trampoline-specific errors
/// - Other ranges: CLN RPC error codes (see comments in from_code)
impl ErrorCode for TrampolineErrorCode {
fn code(&self) -> i32 {
match self {
Self::FeatureNotSupported => 42701,
Self::InvalidNodeId => 42702,
Self::NetworkError => 42703,
Self::UnknownPeer => 42704,
Self::MissingAmount => 42705,
Self::AmbigousAmount => 42706,
Self::MissingChannel => 42707,
Self::InsufficientFunds => 42708,
Self::InvalidInvoice => 42709,
Self::PeerNodeFailure => 42710,
Self::PaymentFailure => 42711,
Self::PeerConnectionFailure => 42712,
Self::MissingPreimage => 42713,
Self::Internal => 42714,
Self::RpcError(cln_err) => cln_err.code(),
}
}
fn from_code(code: i32) -> Option<Self> {
match code {
42701 => Some(Self::FeatureNotSupported),
42702 => Some(Self::InvalidNodeId),
42703 => Some(Self::NetworkError),
42704 => Some(Self::UnknownPeer),
42705 => Some(Self::MissingAmount),
42706 => Some(Self::AmbigousAmount),
42707 => Some(Self::MissingChannel),
42708 => Some(Self::InsufficientFunds),
42709 => Some(Self::InvalidInvoice),
42710 => Some(Self::PeerNodeFailure),
42711 => Some(Self::PeerNodeFailure),
42712 => Some(Self::PeerConnectionFailure),
42713 => Some(Self::MissingPreimage),
42714 => Some(Self::Internal),
// Possible sendpay failure codes:
// -1: Catchall nonspecific error.
// 201: Already paid with this hash using different amount or destination.
// 202: Unparseable onion reply.
// 203: Permanent failure at destination.
// 204: Failure along route; retry a different route.
// 212: localinvreqid refers to an invalid, or used, local invoice_request.
// Possible waitsendpay error codes:
// -1: Catchall nonspecific error.
// 200: Timed out before the payment could complete.
// 202: Unparseable onion reply.
// 203: Permanent failure at destination.
// 204: Failure along route; retry a different route.
// 208: A payment for payment_hash was never made and there is nothing to wait for.
// 209: The payment already failed, but the reason for failure was not stored. This should only occur when querying failed payments on very old databases.
-1 | 200..204 | 208 | 209 | 212 => Some(Self::RpcError(code)),
_ => None,
}
}
}
/// Maps trampoline error codes to appropriate gRPC status codes.
///
/// The mapping follows gRPC best practices:
/// - Client errors (invalid input) → InvalidArgument
/// - Precondition failures → FailedPrecondition
/// - Server errors → Internal
/// - Unknown/unclassified errors → Unknown
impl ErrorStatusConversionExt for TrampolineErrorCode {
fn status_code(&self) -> tonic::Code {
match self {
// Precondition failures: The operation was rejected because the
// system is not in a state required for the operation's execution
TrampolineErrorCode::FeatureNotSupported
| TrampolineErrorCode::UnknownPeer
| TrampolineErrorCode::MissingAmount
| TrampolineErrorCode::MissingChannel
| TrampolineErrorCode::InsufficientFunds
| TrampolineErrorCode::PeerConnectionFailure
| TrampolineErrorCode::MissingPreimage => tonic::Code::FailedPrecondition,
// Invalid arguments: Client specified an invalid argument
TrampolineErrorCode::InvalidNodeId
| TrampolineErrorCode::AmbigousAmount
| TrampolineErrorCode::InvalidInvoice => tonic::Code::InvalidArgument,
// Internal errors: Server-side errors
TrampolineErrorCode::NetworkError
| TrampolineErrorCode::PeerNodeFailure
| TrampolineErrorCode::Internal => tonic::Code::Internal,
// Unknown: Error type cannot be classified
TrampolineErrorCode::PaymentFailure => tonic::Code::Unknown,
// RPC errors are mapped to Unknown as they can represent various
// failure types
TrampolineErrorCode::RpcError(_) => tonic::Code::Unknown,
}
}
}
/// Type alias for convenience.
type Result<T, E = TrampolineError> = core::result::Result<T, E>;
pub type TrampolineError = Error<TrampolineErrorCode>;
fn feature_guard(features: impl Into<Vec<u8>>, feature_bit: usize) -> Result<()> {
let mut features = features.into();
features.reverse();
let byte_pos = feature_bit / 8;
let bit_pos = feature_bit % 8;
if byte_pos >= features.len() || (features[byte_pos] & (1 << bit_pos)) == 0 {
return Err(error!(
TrampolineErrorCode::FeatureNotSupported,
"Peer doesn't support trampoline payments"
)
.with_hint("Use a peer node that supports trampoline pay (feat 427)")
.with_context(json!({"features": hex::encode(features)}).to_string()));
}
Ok(())
}
fn as_option<T>(v: T) -> Option<T>
where
T: Default + PartialEq,
{
if v == T::default() {
None
} else {
Some(v)
}
}
pub async fn trampolinepay(
req: pb::TrampolinePayRequest,
rpc_path: impl AsRef<Path>,
) -> Result<cln_rpc::model::responses::PayResponse> {
let node_id = cln_rpc::primitives::PublicKey::from_slice(&req.trampoline_node_id[..]).map_err(
|source| {
error!(
TrampolineErrorCode::InvalidNodeId,
"Got an invalid node id: {}",
source.to_string()
)
.with_hint("A node id must be exactly 33 bytes (66 hex characters)")
.with_source(source)
},
)?;
let hex_node_id = hex::encode(node_id.serialize());
let mut rpc = ClnRpc::new(&rpc_path).await.map_err(|source| {
error!(
TrampolineErrorCode::NetworkError,
"Couldn't connect to core-lightning node: {}",
source.to_string()
)
})?;
// Extract the amount from the bolt11 or use the set amount field
// Return an error if there is a mismatch.
let decoded = rpc
.call_typed(&cln_rpc::model::requests::DecodepayRequest {
bolt11: req.bolt11.clone(),
description: None,
})
.await?;
let send_pays = rpc
.call_typed(&cln_rpc::model::requests::ListsendpaysRequest {
payment_hash: Some(decoded.payment_hash.clone()),
bolt11: None,
index: None,
limit: None,
start: None,
status: None,
})
.await?;
if send_pays
.payments
.iter()
.any(|p| p.status != cln_rpc::model::responses::ListsendpaysPaymentsStatus::FAILED)
{
let resp = rpc
.call_typed(&cln_rpc::model::requests::WaitsendpayRequest {
payment_hash: decoded.payment_hash.clone(),
groupid: None,
partid: None,
timeout: None,
})
.await?;
let preimage = match resp.payment_preimage {
Some(preimage) => preimage,
None => {
return Err(error!(
TrampolineErrorCode::MissingPreimage,
"Got completed payment part without preimage"
))
}
};
return Ok(cln_rpc::model::responses::PayResponse {
amount_msat: resp.amount_msat.unwrap_or(Amount::from_msat(0)),
amount_sent_msat: resp.amount_sent_msat,
created_at: 0.,
destination: resp.destination,
parts: match resp.partid {
Some(0) => 1,
Some(partid) => partid as u32,
None => 1,
},
payment_hash: resp.payment_hash,
payment_preimage: preimage,
status: match resp.status {
cln_rpc::model::responses::WaitsendpayStatus::COMPLETE => {
cln_rpc::model::responses::PayStatus::COMPLETE
}
},
warning_partial_completion: None,
});
}
let max_group_id = send_pays
.payments
.iter()
.map(|p| p.groupid)
.max()
.unwrap_or(0);
log::debug!(
"New trampoline payment via {}: {} ",
hex_node_id,
req.bolt11.clone()
);
// Wait for the peer connection to re-establish.
log::debug!("Await peer connection to {}", hex_node_id);
AwaitablePeer::new(node_id, rpc_path.as_ref().to_path_buf())
.wait()
.await
.map_err(|e| match e {
AwaitablePeerError::PeerUnknown(s) => {
error!(TrampolineErrorCode::UnknownPeer, "Unknown peer {}", s).with_hint(
"You need to be connected and share a channel with the trampoline node",
)
}
AwaitablePeerError::PeerConnectionFailure(s) => error!(
TrampolineErrorCode::PeerConnectionFailure,
"Can't connect to peer {}", s
),
AwaitablePeerError::Rpc(rpc_error) => rpc_error.into(),
_ => error!(
TrampolineErrorCode::Internal,
"Got an unexpected error while awaiting peer connection: {}",
e.to_string()
)
.with_source(e),
})?;
// Check if peer has signaled that they support forward trampoline pays:
let features = rpc
.call_typed(&cln_rpc::model::requests::ListpeersRequest {
id: Some(node_id),
level: None,
})
.await?
.peers
.first()
.ok_or_else(|| {
error!(
TrampolineErrorCode::UnknownPeer,
"Unknown peer node {}", hex_node_id
).with_hint("You need to have an active channel with the trampoline node before you can execute a trampoline payment")
})?
.features
.as_ref()
.map(|feat| hex::decode(feat))
.ok_or_else(|| {
error!(TrampolineErrorCode::Internal, "missing feature bits on listpeers response")
})?
.map_err(|e| {
error!(TrampolineErrorCode::Internal, "could not parse feature bits from hex").with_source(e)
})?;
feature_guard(features, TRAMPOLINE_FEATURE_BIT)?;
let amount_msat = match (as_option(req.amount_msat), decoded.amount_msat) {
(None, None) => {
return Err(error!(
TrampolineErrorCode::MissingAmount,
"Missing amount")
.with_hint(
"If the invoice does not have a fixed amount you need to set the amount parameter"
));
}
(None, Some(amt)) => amt.msat(),
(Some(amt), None) => amt,
(Some(set_amt), Some(bolt11_amt)) => {
if set_amt != bolt11_amt.msat() {
return Err(error!(
TrampolineErrorCode::AmbigousAmount,
"Invoice amount and the given amount don't match"
)
.with_hint(
"If the invoice has the amount set you don't need to set it as a parameter",
));
}
bolt11_amt.msat()
}
};
// We need to add some sats to the htlcs to allow the trampoline node
// to pay fees on routing.
let tlv_amount_msat = amount_msat;
let overpay = amount_msat as f64
* (as_option(req.maxfeepercent).unwrap_or(DEFAULT_OVERPAY_PERCENT) as f64 / 100 as f64);
let amount_msat = amount_msat + overpay as u64;
debug!("overpay={}, total_amt={}", overpay as u64, amount_msat);
let channels: Vec<Channel> = rpc
.call_typed(&cln_rpc::model::requests::ListpeerchannelsRequest { id: Some(node_id) })
.await?
.channels
.into_iter()
.filter_map(|ch| {
let short_channel_id = ch.short_channel_id.or(ch.alias.and_then(|a| a.local));
let short_channel_id = match short_channel_id {
Some(scid) => scid,
None => {
warn!("Missing short channel id on a channel to {}", &node_id);
return None;
}
};
let spendable_msat = match ch.spendable_msat {
Some(s) => s.msat(),
None => {
warn!(
"Missing missing spendable_msat on channel with scid={}",
short_channel_id.to_string()
);
return None;
}
};
let min_htlc_out_msat = match ch.minimum_htlc_out_msat {
Some(m) => m.msat(),
None => {
warn!(
"Missing missing minimum_htlc_out_msat on channel with scid={}",
short_channel_id.to_string()
);
return None;
}
};
return Some(Channel {
short_channel_id,
spendable_msat,
min_htlc_out_msat,
});
})
.filter(|ch| ch.spendable_msat > 0)
.filter(|ch| ch.spendable_msat > ch.min_htlc_out_msat)
.collect();
// Check if we actually got a channel to the trampoline node.
if channels.is_empty() {
return Err(error!(
TrampolineErrorCode::MissingChannel,
"No active and usable channelt to trampoline node found"
).with_hint("In order to execute a trampoline payment, you heed to share a channel with the trampoline node that has a usable outgoing balance"));
}
// Await and filter out re-established channels.
let deadline = Instant::now() + Duration::from_secs(AWAIT_CHANNELS_TIMEOUT_SEC);
let mut channels =
reestablished_channels(channels, node_id, rpc_path.as_ref().to_path_buf(), deadline)
.await?;
// Try different allocation strategies in sequence. First try in ascending
// order of spendable_msat, giving us most drained channels first. Then
// try in descending order of spendable_msat giving us the channels with the
// biggest local balance first.
debug!(
"Trying to allocate {}msat accross {} channels in ascending order",
amount_msat,
channels.len()
);
let alloc = match find_allocation_ascending_order(&mut channels, amount_msat)
.filter(|alloc| !alloc.is_empty())
{
Some(alloc) => alloc,
None => {
debug!("Failed to allocate {}msat in ascending channel order {:?}, trying in descending order",amount_msat, &channels);
match find_allocation_descending_order(&mut channels, amount_msat)
.filter(|alloc| !alloc.is_empty())
{
Some(alloc) => alloc,
None => {
return Err(error!(
TrampolineErrorCode::InsufficientFunds,
"Insufficient funds, {}msat are required, current maximal available {}msat",
amount_msat,
channels.iter().map(|ch| ch.spendable_msat).sum::<u64>()
));
}
}
}
};
// All set we can preapprove the invoice
let _ = rpc
.call_typed(&cln_rpc::model::requests::PreapproveinvoiceRequest {
bolt11: req.bolt11.clone(),
})
.await?;
// Create TLV payload.
use crate::tlv::{SerializedTlvStream, ToBytes};
let mut payload: SerializedTlvStream = SerializedTlvStream::new();
payload.set_bytes(TLV_BOLT11, req.bolt11.as_bytes());
payload.set_tu64(TLV_AMT_MSAT, tlv_amount_msat);
let payload_hex = hex::encode(SerializedTlvStream::to_bytes(payload));
let mut part_id = if alloc.len() == 1 { 0 } else { 1 };
let group_id = max_group_id + 1;
let mut handles: Vec<
tokio::task::JoinHandle<Result<cln_rpc::model::responses::WaitsendpayResponse>>,
> = vec![];
for ch in &alloc {
let bolt11 = req.bolt11.clone();
let label = req.label.clone();
let part_amt = ch.contrib_msat.clone();
let scid = ch.channel.short_channel_id.clone();
let description = decoded.description.clone();
let payload_hex = payload_hex.clone();
let mut rpc = ClnRpc::new(&rpc_path).await.map_err(|e| {
error!(
TrampolineErrorCode::NetworkError,
"Couldn't connect to core-lightning node: {}",
e.to_string()
)
})?;
let handle = tokio::spawn(async move {
let payment_secret = decoded
.payment_secret
.map(|e| e[..].to_vec())
.ok_or(error!(
TrampolineErrorCode::InvalidInvoice,
"The invoice is invalid, missing payment secret"
))?
.try_into()
.map_err(|e: anyhow::Error| {
error!(
TrampolineErrorCode::InvalidInvoice,
"The invoice is invalid, {}",
e.to_string(),
)
})?;
do_pay(
&mut rpc,
node_id,
bolt11,
label,
description,
part_amt,
scid,
part_id,
group_id,
decoded.payment_hash,
cln_rpc::primitives::Amount::from_msat(amount_msat),
payment_secret,
payload_hex,
as_option(req.maxdelay),
)
.await
});
part_id += 1;
handles.push(handle);
}
let results = join_all(handles).await;
let mut payment_preimage = None;
for result in results {
let response = result.map_err(|e| {
error!(
TrampolineErrorCode::Internal,
"Failed to wait for all tasks to complete"
)
.with_source(e)
})??;
if let Some(preimage) = response.payment_preimage {
payment_preimage = Some(preimage);
}
}
if let Some(payment_preimage) = payment_preimage {
Ok(cln_rpc::model::responses::PayResponse {
destination: Some(decoded.payee),
warning_partial_completion: None,
status: cln_rpc::model::responses::PayStatus::COMPLETE,
amount_msat: cln_rpc::primitives::Amount::from_msat(amount_msat),
amount_sent_msat: cln_rpc::primitives::Amount::from_msat(amount_msat),
created_at: 0.,
parts: alloc.len() as u32,
payment_hash: decoded.payment_hash,
payment_preimage,
})
} else {
Err(error!(
TrampolineErrorCode::PaymentFailure,
"Payment failed, missing payment preimage"
))
}
}
async fn do_pay(
rpc: &mut ClnRpc,
node_id: PublicKey,
bolt11: String,
label: String,
description: Option<String>,
part_amt: u64,
scid: ShortChannelId,
part_id: u64,
group_id: u64,
payment_hash: cln_rpc::primitives::Sha256,
total_amount: Amount,
payment_secret: cln_rpc::primitives::Secret,
payment_metadata: String,
max_delay: Option<u32>,
) -> Result<cln_rpc::model::responses::WaitsendpayResponse> {
let route = cln_rpc::model::requests::SendpayRoute {
amount_msat: cln_rpc::primitives::Amount::from_msat(part_amt),
id: node_id.clone(),
delay: max_delay.unwrap_or(MAX_DELAY_DEFAULT),
channel: scid,
};
debug!(
"Trampoline payment part_id={} with amount={}, using route={:?}",
part_id, part_amt, route
);
let _r: serde_json::Value = rpc
.call_raw(
"sendpay",
&SendpayRequest {
route: vec![route],
payment_hash,
label: as_option(label),
amount_msat: Some(total_amount),
bolt11: Some(bolt11),
payment_secret: Some(payment_secret),
partid: Some(part_id),
localinvreqid: None,
groupid: Some(group_id),
description,
payment_metadata: Some(payment_metadata),
},
)
.await?;
match rpc
.call_typed(&cln_rpc::model::requests::WaitsendpayRequest {
payment_hash: payment_hash,
timeout: None,
partid: Some(part_id),
groupid: Some(group_id),
})
.await
{
Ok(v) => Ok(v),
Err(e) => {
if let Some(code) = e.code {
if code == PAY_UNPARSEABLE_ONION_CODE {
return Err(error!(
TrampolineErrorCode::PeerNodeFailure,
"Got unparsable onion code from peer"
));
}
} else if e.message == PAY_UNPARSEABLE_ONION_MSG {
return Err(error!(
TrampolineErrorCode::PeerNodeFailure,
"Got unparsable onion code from peer"
));
}
Err(e.into())
}
}
}
// FIXME: Once the `assert_send` is removed we can return a `Vec<Channel>` or an
// `Option<Vec<Channel>>` instead of a result.
async fn reestablished_channels(
channels: Vec<Channel>,
node_id: PublicKey,
rpc_path: PathBuf,
deadline: Instant,
) -> Result<Vec<Channel>> {
// Wait for channels to re-establish.
// FIXME: Seems that this is a left-over of the development process that
// ensures that the channels are "sendable", they are `Send`.
crate::awaitables::assert_send(AwaitableChannel::new(
node_id,
ShortChannelId::from_str("1x1x1")
.map_err(|e| error!(TrampolineErrorCode::Internal, "{}", e.to_string()))?,
rpc_path.clone(),
));
let mut futures = Vec::new();
for c in &channels {
let rp = rpc_path.clone();
futures.push(
async move {
timeout_at(
deadline,
AwaitableChannel::new(node_id, c.short_channel_id, rp),
)
.await
}
.boxed(),
);
}
log::info!(
"Starting {} tasks to wait for channels to be ready",
futures.len()
);
let results = join_all(futures).await;
Ok(results
.into_iter()
.zip(channels)
.filter_map(|(result, channel_data)| match result {
Ok(_amount) => Some(channel_data),
_ => None,
})
.collect::<Vec<Channel>>())
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct Channel {
short_channel_id: cln_rpc::primitives::ShortChannelId,
spendable_msat: u64,
min_htlc_out_msat: u64,
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ChannelContribution<'a> {
channel: &'a Channel,
contrib_msat: u64,
}
// Finds a payment allocation by sorting channels in descending order of
// spendable amount.
///
/// This strategy prioritizes channels with the most funds first, which tends to
/// minimize the number of channels used for large payments. For each spendable
/// amount, it further prioritizes channels with smaller minimum HTLC
/// requirements.
fn find_allocation_descending_order<'a>(
channels: &'a mut [Channel],
target_msat: u64,
) -> Option<Vec<ChannelContribution<'a>>> {
// We sort in descending order for spendable_msat and ascending for the
// min_htlc_out_msat, which means that we process the channels with the
// biggest local funds first.
channels.sort_by(|a, b| {
b.spendable_msat
.cmp(&a.spendable_msat)
.then_with(|| a.min_htlc_out_msat.cmp(&b.min_htlc_out_msat))
});
find_allocation(channels, target_msat)
}
/// Finds a payment allocation by sorting channels in ascending order of
/// spendable amount.
///
/// This strategy prioritizes draining smaller channels first, which can help
/// consolidate funds into fewer channels. For each spendable amount,
/// it further prioritizes channels with smaller minimum HTLC requirements.
fn find_allocation_ascending_order<'a>(
channels: &'a mut [Channel],
target_msat: u64,
) -> Option<Vec<ChannelContribution<'a>>> {
// We sort in ascending order for spendable_msat and min_htlc_out_msat,
// which means that we process the smallest channels first.
channels.sort_by(|a, b| {
a.spendable_msat
.cmp(&b.spendable_msat)
.then_with(|| a.min_htlc_out_msat.cmp(&b.min_htlc_out_msat))
});
find_allocation(channels, target_msat)
}
/// Finds an allocation that covers the target amount while respecting channel
/// constraints.
///
/// This function implements a recursive backtracking algorithm that attempts to
/// allocate funds from channels in the order they are provided. It handles
/// complex scenarios where channel minimum requirements may need cascading
/// adjustments to find a valid solution.
///
/// # Algorithm Details
///
/// The algorithm works by:
/// 1. Trying to allocate the maximum possible from each channel
/// 2. If a channel's minimum exceeds the remaining target, it tries to skip
/// that channel
/// 3. When a channel minimum can't be met, it backtracks and adjusts previous
/// allocations
/// 4. It uses a cascading approach to free up just enough space from previous
/// channels
fn find_allocation<'a>(
channels: &'a [Channel],
target_msat: u64,
) -> Option<Vec<ChannelContribution<'a>>> {
// We can not allocate channels for a zero amount.
if target_msat == 0 {
return None;
}
/// Result type for the recursive allocation function
enum AllocResult {
/// Allocation succeeded
Success,
/// Allocation is impossible with current channels
Impossible,
/// Need more space (in msat) to satisfy minimum requirements
NeedSpace(u64),
}
/// Recursive helper function that tries to find a valid allocation
///
/// # Arguments
/// * `channels` - Remaining channels to consider
/// * `target_msat` - Remaining amount to allocate
/// * `allocations` - Current allocation state (modified in-place)
fn try_allocate<'a>(
channels: &'a [Channel],
target_msat: u64,
allocations: &mut Vec<ChannelContribution<'a>>,
) -> AllocResult {
// Base case: If we've exactly allocated the target, we found a solution.
if target_msat == 0 {
return AllocResult::Success;
}
// Check that we have channels left to allocate from.
if channels.is_empty() {
return AllocResult::Impossible;
}
// Try to use the current channel (smallest amount) first.
let ch = &channels[0];
// Channel is drained or unusable, skip it.
if ch.spendable_msat < ch.min_htlc_out_msat || ch.spendable_msat == 0 {
return try_allocate(&channels[1..], target_msat, allocations);
}
// Each channel has an upper and a lower bound defined by the minimum
// HTLC amount and the spendable amount.
let lower = ch.min_htlc_out_msat;
let upper = ch.spendable_msat.min(target_msat);
// We need a higher target amount.
if target_msat < lower {
// First we try skipping this channel to see if later channels can
// handle it.
match try_allocate(&channels[1..], target_msat, allocations) {
AllocResult::Success => return AllocResult::Success,
// If that doesn't work, we need space from earlier allocations
_ => return AllocResult::NeedSpace(lower - target_msat),
}
}
// We can allocate from this channel - try max amount first.
allocations.push(ChannelContribution {
channel: ch,
contrib_msat: upper,
});
// Try to allocate the remaining amount from subsequent channels.
match try_allocate(&channels[1..], target_msat - upper, allocations) {
// Success! We're done.
AllocResult::Success => return AllocResult::Success,
// No solution possible with current allocations
AllocResult::Impossible => return AllocResult::Impossible,
// Need to free up space
AllocResult::NeedSpace(shortfall) => {
// Calculate how much we can free from this allocation.
let free = upper - lower;
if shortfall <= free {
// We can cover the shortfall with free space in this channel
allocations.pop();
let adjusted_amount = upper - shortfall;
allocations.push(ChannelContribution {
channel: ch,
contrib_msat: adjusted_amount,
});
// Try allocation with the adjusted amount.
match try_allocate(&channels[1..], target_msat - adjusted_amount, allocations) {
AllocResult::Success => return AllocResult::Success,
_ => {
// If that still don't work skip this channel completely.
// NOTE: We could also try to skip the next channel.
allocations.pop();
return try_allocate(&channels[1..], target_msat, allocations);
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | true |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/node/rpcwait.rs | libs/gl-plugin/src/node/rpcwait.rs | use log::warn;
use tonic::server::NamedService;
use tower::Service;
/// The RPC socket will not be available right away, so we wrap the
/// cln-grpc service with this `Service` which essentially checks for
/// the file's existence, and if it doesn't exist we wait for up to 5
/// seconds for it to appear.
#[derive(Debug, Clone)]
pub struct RpcWaitService<S> {
rpc_path: std::path::PathBuf,
inner: S,
}
impl<S> RpcWaitService<S> {
pub fn new(inner: S, rpc_path: std::path::PathBuf) -> Self {
RpcWaitService { rpc_path, inner }
}
}
impl<S> Service<hyper::Request<hyper::Body>> for RpcWaitService<S>
where
S: Service<hyper::Request<hyper::Body>, Response = hyper::Response<tonic::body::BoxBody>>
+ Clone
+ Send
+ 'static,
S::Future: Send + 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = futures::future::BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, request: hyper::Request<hyper::Body>) -> Self::Future {
// This is necessary because tonic internally uses `tower::buffer::Buffer`.
// See https://github.com/tower-rs/tower/issues/547#issuecomment-767629149
// for details on why this is necessary
let clone = self.inner.clone();
let mut inner = std::mem::replace(&mut self.inner, clone);
let path = self.rpc_path.clone();
Box::pin(async move {
let deadline = tokio::time::Instant::now() + tokio::time::Duration::from_secs(5);
loop {
if deadline < tokio::time::Instant::now() {
// Break and let it fail in the `inner.call`
warn!("Deadline reached, letting the call fail");
break;
}
match path.metadata() {
Ok(_) => break,
Err(_) => tokio::time::sleep(tokio::time::Duration::from_millis(500)).await,
}
}
inner.call(request).await
})
}
}
impl<S> NamedService for RpcWaitService<S> {
// Well, this is cheating a bit, since we'll only ever wrap the
// cln.Node we can have this fixed.
const NAME: &'static str = "cln.Node";
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/node/wrapper.rs | libs/gl-plugin/src/node/wrapper.rs | use std::collections::HashMap;
use std::str::FromStr;
use anyhow::Error;
use cln_grpc;
use cln_grpc::pb::{self, node_server::Node};
use cln_rpc::primitives::ChannelState;
use cln_rpc::{self};
use log::debug;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{Request, Response, Status};
use super::PluginNodeServer;
/// `WrappedNodeServer` enables us to quickly add customizations to
/// the pure passthru of the `cln_grpc::Server`. In particular it
/// implements the guarding against RPC commands that'd require a
/// signature if no HSM is attached (that'd lock up our node) and
/// providing RouteHints for disconnected and zeroconf channels too.
#[derive(Clone)]
pub struct WrappedNodeServer {
inner: cln_grpc::Server,
node_server: PluginNodeServer,
}
// TODO Make node into a module and add the WrappedNodeServer as a submodule.
impl WrappedNodeServer {
pub async fn new(node_server: PluginNodeServer) -> anyhow::Result<Self> {
let inner =
cln_grpc::Server::new(&node_server.rpc_path, node_server.notifications.clone()).await?;
Ok(WrappedNodeServer { inner, node_server })
}
}
// This would be so much easier if we have some form of delegation
// already...
#[tonic::async_trait]
impl Node for WrappedNodeServer {
async fn invoice(
&self,
req: Request<pb::InvoiceRequest>,
) -> Result<Response<pb::InvoiceResponse>, Status> {
let req = req.into_inner();
let mut rpc = cln_rpc::ClnRpc::new(self.node_server.rpc_path.clone())
.await
.unwrap();
// First we get the incoming channels so we can force them to
// be added to the invoice. This is best effort and will be
// left out if the call fails, reverting to the default
// behavior.
let hints: Option<Vec<Vec<pb::RouteHop>>> = self
.get_routehints(&mut rpc)
.await
.map(
// Map Result to Result
|v| {
v.into_iter()
.map(
// map each vector element
|rh| rh.hops,
)
.collect()
},
)
.ok();
let mut pbreq: crate::requests::Invoice = match req.clone().try_into() {
Ok(v) => v,
Err(e) => {
return Err(Status::new(
tonic::Code::Internal,
format!(
"could not convert protobuf request into JSON-RPC request: {:?}",
e.to_string()
),
));
}
};
pbreq.dev_routes = hints.map(|v| {
v.into_iter()
.map(|e| e.into_iter().map(|ee| ee.into()).collect())
.collect()
});
pbreq.cltv = match pbreq.cltv {
Some(c) => Some(c), // Keep any set value
None => Some(144), // Use a day if not set
};
let res: Result<crate::responses::Invoice, cln_rpc::RpcError> =
rpc.call_raw("invoice", &pbreq).await;
let res: Result<cln_grpc::pb::InvoiceResponse, tonic::Status> = res
.map(|r| cln_grpc::pb::InvoiceResponse::from(r))
.map_err(|e| {
tonic::Status::new(
tonic::Code::Internal,
format!("converting invoice response to grpc: {}", e),
)
});
res.map(|r| Response::new(r))
}
async fn getinfo(
&self,
r: Request<pb::GetinfoRequest>,
) -> Result<Response<pb::GetinfoResponse>, Status> {
self.inner.getinfo(r).await
}
async fn list_offers(
&self,
r: Request<pb::ListoffersRequest>,
) -> Result<Response<pb::ListoffersResponse>, Status> {
self.inner.list_offers(r).await
}
async fn offer(
&self,
r: Request<pb::OfferRequest>,
) -> Result<Response<pb::OfferResponse>, Status> {
self.inner.offer(r).await
}
async fn bkpr_list_income(
&self,
r: Request<pb::BkprlistincomeRequest>,
) -> Result<Response<pb::BkprlistincomeResponse>, Status> {
self.inner.bkpr_list_income(r).await
}
async fn list_peers(
&self,
r: Request<pb::ListpeersRequest>,
) -> Result<Response<pb::ListpeersResponse>, Status> {
self.inner.list_peers(r).await
}
async fn list_peer_channels(
&self,
r: Request<pb::ListpeerchannelsRequest>,
) -> Result<Response<pb::ListpeerchannelsResponse>, Status> {
self.inner.list_peer_channels(r).await
}
async fn list_closed_channels(
&self,
r: Request<pb::ListclosedchannelsRequest>,
) -> Result<Response<pb::ListclosedchannelsResponse>, Status> {
self.inner.list_closed_channels(r).await
}
async fn list_funds(
&self,
r: Request<pb::ListfundsRequest>,
) -> Result<Response<pb::ListfundsResponse>, Status> {
self.inner.list_funds(r).await
}
async fn decode_pay(
&self,
r: Request<pb::DecodepayRequest>,
) -> Result<Response<pb::DecodepayResponse>, Status> {
self.inner.decode_pay(r).await
}
async fn decode(
&self,
r: Request<pb::DecodeRequest>,
) -> Result<Response<pb::DecodeResponse>, Status> {
self.inner.decode(r).await
}
async fn sign_invoice(
&self,
r: Request<pb::SigninvoiceRequest>,
) -> Result<Response<pb::SigninvoiceResponse>, Status> {
self.inner.sign_invoice(r).await
}
async fn pre_approve_keysend(
&self,
r: Request<pb::PreapprovekeysendRequest>,
) -> Result<Response<pb::PreapprovekeysendResponse>, Status> {
self.inner.pre_approve_keysend(r).await
}
async fn pre_approve_invoice(
&self,
r: Request<pb::PreapproveinvoiceRequest>,
) -> Result<Response<pb::PreapproveinvoiceResponse>, Status> {
self.inner.pre_approve_invoice(r).await
}
async fn send_custom_msg(
&self,
r: Request<pb::SendcustommsgRequest>,
) -> Result<Response<pb::SendcustommsgResponse>, Status> {
self.inner.send_custom_msg(r).await
}
async fn send_pay(
&self,
r: Request<pb::SendpayRequest>,
) -> Result<Response<pb::SendpayResponse>, Status> {
self.inner.send_pay(r).await
}
async fn list_channels(
&self,
r: Request<pb::ListchannelsRequest>,
) -> Result<Response<pb::ListchannelsResponse>, Status> {
self.inner.list_channels(r).await
}
async fn add_gossip(
&self,
r: Request<pb::AddgossipRequest>,
) -> Result<Response<pb::AddgossipResponse>, Status> {
self.inner.add_gossip(r).await
}
async fn check_message(
&self,
r: Request<pb::CheckmessageRequest>,
) -> Result<Response<pb::CheckmessageResponse>, Status> {
self.inner.check_message(r).await
}
async fn close(
&self,
r: Request<pb::CloseRequest>,
) -> Result<Response<pb::CloseResponse>, Status> {
self.inner.close(r).await
}
async fn connect_peer(
&self,
r: Request<pb::ConnectRequest>,
) -> Result<Response<pb::ConnectResponse>, Status> {
self.inner.connect_peer(r).await
}
async fn create_invoice(
&self,
r: Request<pb::CreateinvoiceRequest>,
) -> Result<Response<pb::CreateinvoiceResponse>, Status> {
self.inner.create_invoice(r).await
}
async fn datastore(
&self,
r: Request<pb::DatastoreRequest>,
) -> Result<Response<pb::DatastoreResponse>, Status> {
self.inner.datastore(r).await
}
async fn create_onion(
&self,
r: Request<pb::CreateonionRequest>,
) -> Result<Response<pb::CreateonionResponse>, Status> {
self.inner.create_onion(r).await
}
async fn del_datastore(
&self,
r: Request<pb::DeldatastoreRequest>,
) -> Result<Response<pb::DeldatastoreResponse>, Status> {
self.inner.del_datastore(r).await
}
async fn del_invoice(
&self,
r: Request<pb::DelinvoiceRequest>,
) -> Result<Response<pb::DelinvoiceResponse>, Status> {
self.inner.del_invoice(r).await
}
async fn list_datastore(
&self,
r: Request<pb::ListdatastoreRequest>,
) -> Result<Response<pb::ListdatastoreResponse>, Status> {
self.inner.list_datastore(r).await
}
async fn list_invoices(
&self,
r: Request<pb::ListinvoicesRequest>,
) -> Result<Response<pb::ListinvoicesResponse>, Status> {
self.inner.list_invoices(r).await
}
async fn send_onion(
&self,
r: Request<pb::SendonionRequest>,
) -> Result<Response<pb::SendonionResponse>, Status> {
self.inner.send_onion(r).await
}
async fn list_send_pays(
&self,
r: Request<pb::ListsendpaysRequest>,
) -> Result<Response<pb::ListsendpaysResponse>, Status> {
self.inner.list_send_pays(r).await
}
async fn list_transactions(
&self,
r: Request<pb::ListtransactionsRequest>,
) -> Result<Response<pb::ListtransactionsResponse>, Status> {
self.inner.list_transactions(r).await
}
async fn pay(&self, r: Request<pb::PayRequest>) -> Result<Response<pb::PayResponse>, Status> {
self.inner.pay(r).await
}
async fn list_nodes(
&self,
r: Request<pb::ListnodesRequest>,
) -> Result<Response<pb::ListnodesResponse>, Status> {
self.inner.list_nodes(r).await
}
async fn wait_any_invoice(
&self,
r: Request<pb::WaitanyinvoiceRequest>,
) -> Result<Response<pb::WaitanyinvoiceResponse>, Status> {
self.inner.wait_any_invoice(r).await
}
async fn wait_invoice(
&self,
r: Request<pb::WaitinvoiceRequest>,
) -> Result<Response<pb::WaitinvoiceResponse>, Status> {
self.inner.wait_invoice(r).await
}
async fn wait_send_pay(
&self,
r: Request<pb::WaitsendpayRequest>,
) -> Result<Response<pb::WaitsendpayResponse>, Status> {
self.inner.wait_send_pay(r).await
}
async fn wait_block_height(
&self,
r: Request<pb::WaitblockheightRequest>,
) -> Result<Response<pb::WaitblockheightResponse>, Status> {
self.inner.wait_block_height(r).await
}
async fn new_addr(
&self,
r: Request<pb::NewaddrRequest>,
) -> Result<Response<pb::NewaddrResponse>, Status> {
self.inner.new_addr(r).await
}
async fn withdraw(
&self,
r: Request<pb::WithdrawRequest>,
) -> Result<Response<pb::WithdrawResponse>, Status> {
self.inner.withdraw(r).await
}
async fn key_send(
&self,
r: Request<pb::KeysendRequest>,
) -> Result<Response<pb::KeysendResponse>, Status> {
self.inner.key_send(r).await
}
async fn fund_psbt(
&self,
r: Request<pb::FundpsbtRequest>,
) -> Result<Response<pb::FundpsbtResponse>, Status> {
self.inner.fund_psbt(r).await
}
async fn send_psbt(
&self,
r: Request<pb::SendpsbtRequest>,
) -> Result<Response<pb::SendpsbtResponse>, Status> {
self.inner.send_psbt(r).await
}
async fn sign_psbt(
&self,
r: Request<pb::SignpsbtRequest>,
) -> Result<Response<pb::SignpsbtResponse>, Status> {
self.inner.sign_psbt(r).await
}
async fn utxo_psbt(
&self,
r: Request<pb::UtxopsbtRequest>,
) -> Result<Response<pb::UtxopsbtResponse>, Status> {
self.inner.utxo_psbt(r).await
}
async fn tx_discard(
&self,
r: Request<pb::TxdiscardRequest>,
) -> Result<Response<pb::TxdiscardResponse>, Status> {
self.inner.tx_discard(r).await
}
async fn tx_prepare(
&self,
r: Request<pb::TxprepareRequest>,
) -> Result<Response<pb::TxprepareResponse>, Status> {
self.inner.tx_prepare(r).await
}
async fn tx_send(
&self,
r: Request<pb::TxsendRequest>,
) -> Result<Response<pb::TxsendResponse>, Status> {
self.inner.tx_send(r).await
}
async fn disconnect(
&self,
r: Request<pb::DisconnectRequest>,
) -> Result<Response<pb::DisconnectResponse>, Status> {
let inner = r.into_inner();
let id = hex::encode(inner.id.clone());
debug!(
"Got a disconnect request for {}, try to delete it from the datastore peerlist.",
id
);
// We try to delete the peer that we disconnect from from the datastore.
// We don't want to be overly strict on this so we don't throw an error
// if this does not work.
let data_res = self
.del_datastore(Request::new(pb::DeldatastoreRequest {
key: vec!["greenlight".to_string(), "peerlist".to_string(), id.clone()],
generation: None,
}))
.await;
if let Err(e) = data_res {
log::debug!("Could not delete peer {} from datastore: {}", id, e);
}
self.inner.disconnect(Request::new(inner.clone())).await
}
async fn feerates(
&self,
r: Request<pb::FeeratesRequest>,
) -> Result<Response<pb::FeeratesResponse>, Status> {
self.inner.feerates(r).await
}
async fn fund_channel(
&self,
r: Request<pb::FundchannelRequest>,
) -> Result<Response<pb::FundchannelResponse>, Status> {
self.inner.fund_channel(r).await
}
async fn get_route(
&self,
r: Request<pb::GetrouteRequest>,
) -> Result<Response<pb::GetrouteResponse>, Status> {
self.inner.get_route(r).await
}
async fn list_forwards(
&self,
r: Request<pb::ListforwardsRequest>,
) -> Result<Response<pb::ListforwardsResponse>, Status> {
self.inner.list_forwards(r).await
}
async fn list_pays(
&self,
r: Request<pb::ListpaysRequest>,
) -> Result<Response<pb::ListpaysResponse>, Status> {
self.inner.list_pays(r).await
}
async fn ping(
&self,
r: Request<pb::PingRequest>,
) -> Result<Response<pb::PingResponse>, Status> {
self.inner.ping(r).await
}
async fn set_channel(
&self,
r: Request<pb::SetchannelRequest>,
) -> Result<Response<pb::SetchannelResponse>, Status> {
self.inner.set_channel(r).await
}
async fn sign_message(
&self,
r: Request<pb::SignmessageRequest>,
) -> Result<Response<pb::SignmessageResponse>, Status> {
self.inner.sign_message(r).await
}
async fn stop(
&self,
r: Request<pb::StopRequest>,
) -> Result<Response<pb::StopResponse>, Status> {
self.inner.stop(r).await
}
async fn static_backup(
&self,
r: Request<pb::StaticbackupRequest>,
) -> Result<Response<pb::StaticbackupResponse>, Status> {
self.inner.static_backup(r).await
}
async fn list_htlcs(
&self,
r: Request<pb::ListhtlcsRequest>,
) -> Result<Response<pb::ListhtlcsResponse>, Status> {
self.inner.list_htlcs(r).await
}
async fn datastore_usage(
&self,
r: Request<pb::DatastoreusageRequest>,
) -> Result<Response<pb::DatastoreusageResponse>, Status> {
self.inner.datastore_usage(r).await
}
async fn fetch_invoice(
&self,
request: tonic::Request<pb::FetchinvoiceRequest>,
) -> Result<tonic::Response<pb::FetchinvoiceResponse>, tonic::Status> {
self.inner.fetch_invoice(request).await
}
async fn wait(
&self,
request: tonic::Request<pb::WaitRequest>,
) -> Result<tonic::Response<pb::WaitResponse>, tonic::Status> {
self.inner.wait(request).await
}
async fn add_psbt_output(
&self,
request: tonic::Request<pb::AddpsbtoutputRequest>,
) -> Result<tonic::Response<pb::AddpsbtoutputResponse>, tonic::Status> {
self.inner.add_psbt_output(request).await
}
async fn auto_clean_once(
&self,
request: tonic::Request<pb::AutocleanonceRequest>,
) -> Result<tonic::Response<pb::AutocleanonceResponse>, tonic::Status> {
self.inner.auto_clean_once(request).await
}
async fn auto_clean_status(
&self,
request: tonic::Request<pb::AutocleanstatusRequest>,
) -> Result<tonic::Response<pb::AutocleanstatusResponse>, tonic::Status> {
self.inner.auto_clean_status(request).await
}
async fn dev_forget_channel(
&self,
request: tonic::Request<pb::DevforgetchannelRequest>,
) -> Result<tonic::Response<pb::DevforgetchannelResponse>, tonic::Status> {
self.inner.dev_forget_channel(request).await
}
async fn emergency_recover(
&self,
request: tonic::Request<pb::EmergencyrecoverRequest>,
) -> Result<tonic::Response<pb::EmergencyrecoverResponse>, tonic::Status> {
self.inner.emergency_recover(request).await
}
async fn get_emergency_recover_data(
&self,
request: tonic::Request<pb::GetemergencyrecoverdataRequest>,
) -> Result<tonic::Response<pb::GetemergencyrecoverdataResponse>, tonic::Status> {
self.inner.get_emergency_recover_data(request).await
}
async fn expose_secret(
&self,
request: tonic::Request<pb::ExposesecretRequest>,
) -> Result<tonic::Response<pb::ExposesecretResponse>, tonic::Status> {
self.inner.expose_secret(request).await
}
async fn recover(
&self,
request: tonic::Request<pb::RecoverRequest>,
) -> Result<tonic::Response<pb::RecoverResponse>, tonic::Status> {
self.inner.recover(request).await
}
async fn recover_channel(
&self,
request: tonic::Request<pb::RecoverchannelRequest>,
) -> Result<tonic::Response<pb::RecoverchannelResponse>, tonic::Status> {
self.inner.recover_channel(request).await
}
async fn create_invoice_request(
&self,
request: tonic::Request<pb::InvoicerequestRequest>,
) -> Result<tonic::Response<pb::InvoicerequestResponse>, tonic::Status> {
self.inner.create_invoice_request(request).await
}
async fn disable_invoice_request(
&self,
request: tonic::Request<pb::DisableinvoicerequestRequest>,
) -> Result<tonic::Response<pb::DisableinvoicerequestResponse>, tonic::Status> {
self.inner.disable_invoice_request(request).await
}
async fn list_invoice_requests(
&self,
request: tonic::Request<pb::ListinvoicerequestsRequest>,
) -> Result<tonic::Response<pb::ListinvoicerequestsResponse>, tonic::Status> {
self.inner.list_invoice_requests(request).await
}
async fn make_secret(
&self,
request: tonic::Request<pb::MakesecretRequest>,
) -> Result<tonic::Response<pb::MakesecretResponse>, tonic::Status> {
self.inner.make_secret(request).await
}
async fn del_pay(
&self,
request: tonic::Request<pb::DelpayRequest>,
) -> Result<tonic::Response<pb::DelpayResponse>, tonic::Status> {
self.inner.del_pay(request).await
}
async fn del_forward(
&self,
request: tonic::Request<pb::DelforwardRequest>,
) -> Result<tonic::Response<pb::DelforwardResponse>, tonic::Status> {
self.inner.del_forward(request).await
}
async fn disable_offer(
&self,
request: tonic::Request<pb::DisableofferRequest>,
) -> Result<tonic::Response<pb::DisableofferResponse>, tonic::Status> {
self.inner.disable_offer(request).await
}
async fn enable_offer(
&self,
request: tonic::Request<pb::EnableofferRequest>,
) -> Result<tonic::Response<pb::EnableofferResponse>, tonic::Status> {
self.inner.enable_offer(request).await
}
async fn fund_channel_cancel(
&self,
request: tonic::Request<pb::FundchannelCancelRequest>,
) -> Result<tonic::Response<pb::FundchannelCancelResponse>, tonic::Status> {
self.inner.fund_channel_cancel(request).await
}
async fn fund_channel_complete(
&self,
request: tonic::Request<pb::FundchannelCompleteRequest>,
) -> Result<tonic::Response<pb::FundchannelCompleteResponse>, tonic::Status> {
self.inner.fund_channel_complete(request).await
}
async fn fund_channel_start(
&self,
request: tonic::Request<pb::FundchannelStartRequest>,
) -> Result<tonic::Response<pb::FundchannelStartResponse>, tonic::Status> {
self.inner.fund_channel_start(request).await
}
async fn get_log(
&self,
request: tonic::Request<pb::GetlogRequest>,
) -> Result<tonic::Response<pb::GetlogResponse>, tonic::Status> {
self.inner.get_log(request).await
}
async fn funder_update(
&self,
request: tonic::Request<pb::FunderupdateRequest>,
) -> Result<tonic::Response<pb::FunderupdateResponse>, tonic::Status> {
self.inner.funder_update(request).await
}
async fn list_addresses(
&self,
request: tonic::Request<pb::ListaddressesRequest>,
) -> Result<tonic::Response<pb::ListaddressesResponse>, tonic::Status> {
self.inner.list_addresses(request).await
}
async fn multi_fund_channel(
&self,
request: tonic::Request<pb::MultifundchannelRequest>,
) -> Result<tonic::Response<pb::MultifundchannelResponse>, tonic::Status> {
self.inner.multi_fund_channel(request).await
}
async fn multi_withdraw(
&self,
request: tonic::Request<pb::MultiwithdrawRequest>,
) -> Result<tonic::Response<pb::MultiwithdrawResponse>, tonic::Status> {
self.inner.multi_withdraw(request).await
}
async fn open_channel_abort(
&self,
request: tonic::Request<pb::OpenchannelAbortRequest>,
) -> Result<tonic::Response<pb::OpenchannelAbortResponse>, tonic::Status> {
self.inner.open_channel_abort(request).await
}
async fn open_channel_bump(
&self,
request: tonic::Request<pb::OpenchannelBumpRequest>,
) -> Result<tonic::Response<pb::OpenchannelBumpResponse>, tonic::Status> {
self.inner.open_channel_bump(request).await
}
async fn open_channel_init(
&self,
request: tonic::Request<pb::OpenchannelInitRequest>,
) -> Result<tonic::Response<pb::OpenchannelInitResponse>, tonic::Status> {
self.inner.open_channel_init(request).await
}
async fn open_channel_signed(
&self,
request: tonic::Request<pb::OpenchannelSignedRequest>,
) -> Result<tonic::Response<pb::OpenchannelSignedResponse>, tonic::Status> {
self.inner.open_channel_signed(request).await
}
async fn open_channel_update(
&self,
request: tonic::Request<pb::OpenchannelUpdateRequest>,
) -> Result<tonic::Response<pb::OpenchannelUpdateResponse>, tonic::Status> {
self.inner.open_channel_update(request).await
}
async fn plugin(
&self,
request: tonic::Request<pb::PluginRequest>,
) -> Result<tonic::Response<pb::PluginResponse>, tonic::Status> {
self.inner.plugin(request).await
}
async fn rene_pay_status(
&self,
request: tonic::Request<pb::RenepaystatusRequest>,
) -> Result<tonic::Response<pb::RenepaystatusResponse>, tonic::Status> {
self.inner.rene_pay_status(request).await
}
async fn rene_pay(
&self,
request: tonic::Request<pb::RenepayRequest>,
) -> Result<tonic::Response<pb::RenepayResponse>, tonic::Status> {
self.inner.rene_pay(request).await
}
async fn reserve_inputs(
&self,
request: tonic::Request<pb::ReserveinputsRequest>,
) -> Result<tonic::Response<pb::ReserveinputsResponse>, tonic::Status> {
self.inner.reserve_inputs(request).await
}
async fn send_invoice(
&self,
request: tonic::Request<pb::SendinvoiceRequest>,
) -> Result<tonic::Response<pb::SendinvoiceResponse>, tonic::Status> {
self.inner.send_invoice(request).await
}
async fn set_config(
&self,
request: tonic::Request<pb::SetconfigRequest>,
) -> Result<tonic::Response<pb::SetconfigResponse>, tonic::Status> {
self.inner.set_config(request).await
}
async fn set_psbt_version(
&self,
request: tonic::Request<pb::SetpsbtversionRequest>,
) -> Result<tonic::Response<pb::SetpsbtversionResponse>, tonic::Status> {
self.inner.set_psbt_version(request).await
}
async fn splice_init(
&self,
request: tonic::Request<pb::SpliceInitRequest>,
) -> Result<tonic::Response<pb::SpliceInitResponse>, tonic::Status> {
self.inner.splice_init(request).await
}
async fn splice_signed(
&self,
request: tonic::Request<pb::SpliceSignedRequest>,
) -> Result<tonic::Response<pb::SpliceSignedResponse>, tonic::Status> {
self.inner.splice_signed(request).await
}
async fn splice_update(
&self,
request: tonic::Request<pb::SpliceUpdateRequest>,
) -> Result<tonic::Response<pb::SpliceUpdateResponse>, tonic::Status> {
self.inner.splice_update(request).await
}
async fn dev_splice(
&self,
request: tonic::Request<pb::DevspliceRequest>,
) -> Result<tonic::Response<pb::DevspliceResponse>, tonic::Status> {
self.inner.dev_splice(request).await
}
async fn unreserve_inputs(
&self,
request: tonic::Request<pb::UnreserveinputsRequest>,
) -> Result<tonic::Response<pb::UnreserveinputsResponse>, tonic::Status> {
self.inner.unreserve_inputs(request).await
}
async fn upgrade_wallet(
&self,
request: tonic::Request<pb::UpgradewalletRequest>,
) -> Result<tonic::Response<pb::UpgradewalletResponse>, tonic::Status> {
self.inner.upgrade_wallet(request).await
}
async fn list_configs(
&self,
request: tonic::Request<pb::ListconfigsRequest>,
) -> Result<tonic::Response<pb::ListconfigsResponse>, tonic::Status> {
self.inner.list_configs(request).await
}
async fn help(
&self,
request: tonic::Request<pb::HelpRequest>,
) -> Result<tonic::Response<pb::HelpResponse>, tonic::Status> {
self.inner.help(request).await
}
async fn bkpr_channels_apy(
&self,
request: tonic::Request<pb::BkprchannelsapyRequest>,
) -> Result<tonic::Response<pb::BkprchannelsapyResponse>, tonic::Status> {
self.inner.bkpr_channels_apy(request).await
}
async fn bkpr_dump_income_csv(
&self,
request: tonic::Request<pb::BkprdumpincomecsvRequest>,
) -> Result<tonic::Response<pb::BkprdumpincomecsvResponse>, tonic::Status> {
self.inner.bkpr_dump_income_csv(request).await
}
async fn bkpr_inspect(
&self,
request: tonic::Request<pb::BkprinspectRequest>,
) -> Result<tonic::Response<pb::BkprinspectResponse>, tonic::Status> {
self.inner.bkpr_inspect(request).await
}
async fn bkpr_list_account_events(
&self,
request: tonic::Request<pb::BkprlistaccounteventsRequest>,
) -> Result<tonic::Response<pb::BkprlistaccounteventsResponse>, tonic::Status> {
self.inner.bkpr_list_account_events(request).await
}
async fn bkpr_list_balances(
&self,
request: tonic::Request<pb::BkprlistbalancesRequest>,
) -> Result<tonic::Response<pb::BkprlistbalancesResponse>, tonic::Status> {
self.inner.bkpr_list_balances(request).await
}
async fn bkpr_edit_description_by_outpoint(
&self,
request: tonic::Request<pb::BkpreditdescriptionbyoutpointRequest>,
) -> Result<tonic::Response<pb::BkpreditdescriptionbyoutpointResponse>, tonic::Status> {
self.inner.bkpr_edit_description_by_outpoint(request).await
}
async fn bkpr_edit_description_by_payment_id(
&self,
request: tonic::Request<pb::BkpreditdescriptionbypaymentidRequest>,
) -> Result<tonic::Response<pb::BkpreditdescriptionbypaymentidResponse>, tonic::Status> {
self.inner
.bkpr_edit_description_by_payment_id(request)
.await
}
async fn blacklist_rune(
&self,
request: tonic::Request<pb::BlacklistruneRequest>,
) -> Result<tonic::Response<pb::BlacklistruneResponse>, tonic::Status> {
self.inner.blacklist_rune(request).await
}
async fn check_rune(
&self,
request: tonic::Request<pb::CheckruneRequest>,
) -> Result<tonic::Response<pb::CheckruneResponse>, tonic::Status> {
self.inner.check_rune(request).await
}
async fn create_rune(
&self,
request: tonic::Request<pb::CreateruneRequest>,
) -> Result<tonic::Response<pb::CreateruneResponse>, tonic::Status> {
self.inner.create_rune(request).await
}
async fn show_runes(
&self,
request: tonic::Request<pb::ShowrunesRequest>,
) -> Result<tonic::Response<pb::ShowrunesResponse>, tonic::Status> {
self.inner.show_runes(request).await
}
async fn ask_rene_unreserve(
&self,
request: tonic::Request<pb::AskreneunreserveRequest>,
) -> Result<tonic::Response<pb::AskreneunreserveResponse>, tonic::Status> {
self.inner.ask_rene_unreserve(request).await
}
async fn ask_rene_list_layers(
&self,
request: tonic::Request<pb::AskrenelistlayersRequest>,
) -> Result<tonic::Response<pb::AskrenelistlayersResponse>, tonic::Status> {
self.inner.ask_rene_list_layers(request).await
}
async fn ask_rene_create_layer(
&self,
request: tonic::Request<pb::AskrenecreatelayerRequest>,
) -> Result<tonic::Response<pb::AskrenecreatelayerResponse>, tonic::Status> {
self.inner.ask_rene_create_layer(request).await
}
async fn ask_rene_remove_layer(
&self,
request: tonic::Request<pb::AskreneremovelayerRequest>,
) -> Result<tonic::Response<pb::AskreneremovelayerResponse>, tonic::Status> {
self.inner.ask_rene_remove_layer(request).await
}
async fn ask_rene_reserve(
&self,
request: tonic::Request<pb::AskrenereserveRequest>,
) -> Result<tonic::Response<pb::AskrenereserveResponse>, tonic::Status> {
self.inner.ask_rene_reserve(request).await
}
async fn ask_rene_age(
&self,
request: tonic::Request<pb::AskreneageRequest>,
) -> Result<tonic::Response<pb::AskreneageResponse>, tonic::Status> {
self.inner.ask_rene_age(request).await
}
async fn get_routes(
&self,
request: tonic::Request<pb::GetroutesRequest>,
) -> Result<tonic::Response<pb::GetroutesResponse>, tonic::Status> {
self.inner.get_routes(request).await
}
async fn ask_rene_disable_node(
&self,
request: tonic::Request<pb::AskrenedisablenodeRequest>,
) -> Result<tonic::Response<pb::AskrenedisablenodeResponse>, tonic::Status> {
self.inner.ask_rene_disable_node(request).await
}
async fn ask_rene_inform_channel(
&self,
request: tonic::Request<pb::AskreneinformchannelRequest>,
) -> Result<tonic::Response<pb::AskreneinformchannelResponse>, tonic::Status> {
self.inner.ask_rene_inform_channel(request).await
}
async fn ask_rene_create_channel(
&self,
request: tonic::Request<pb::AskrenecreatechannelRequest>,
) -> Result<tonic::Response<pb::AskrenecreatechannelResponse>, tonic::Status> {
self.inner.ask_rene_create_channel(request).await
}
async fn ask_rene_update_channel(
&self,
request: tonic::Request<pb::AskreneupdatechannelRequest>,
) -> Result<tonic::Response<pb::AskreneupdatechannelResponse>, tonic::Status> {
self.inner.ask_rene_update_channel(request).await
}
async fn ask_rene_bias_channel(
&self,
request: tonic::Request<pb::AskrenebiaschannelRequest>,
) -> Result<tonic::Response<pb::AskrenebiaschannelResponse>, tonic::Status> {
self.inner.ask_rene_bias_channel(request).await
}
async fn ask_rene_list_reservations(
&self,
request: tonic::Request<pb::AskrenelistreservationsRequest>,
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | true |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/node/mod.rs | libs/gl-plugin/src/node/mod.rs | use crate::config::Config;
use crate::pb::{self, node_server::Node};
use crate::storage::StateStore;
use crate::{messages, Event};
use crate::{stager, tramp};
use anyhow::{Context, Error, Result};
use base64::{engine::general_purpose, Engine as _};
use bytes::BufMut;
use cln_rpc::Notification;
use gl_client::persist::State;
use governor::{
clock::MonotonicClock, state::direct::NotKeyed, state::InMemoryState, Quota, RateLimiter,
};
use lazy_static::lazy_static;
use log::{debug, error, info, trace, warn};
use std::path::{Path, PathBuf};
use std::sync::atomic::AtomicBool;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::Duration;
use tokio::sync::{broadcast, mpsc, Mutex, OnceCell};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{transport::ServerTlsConfig, Code, Request, Response, Status};
mod wrapper;
use gl_client::bitcoin;
use std::str::FromStr;
pub use wrapper::WrappedNodeServer;
static LIMITER: OnceCell<RateLimiter<NotKeyed, InMemoryState, MonotonicClock>> =
OnceCell::const_new();
static RPC_CLIENT: OnceCell<Arc<Mutex<cln_rpc::ClnRpc>>> = OnceCell::const_new();
static RPC_POLL_INTERVAL: Duration = Duration::from_millis(500);
#[allow(unused)]
const OPT_SUPPORTS_LSPS: usize = 729;
pub async fn get_rpc<P: AsRef<Path>>(path: P) -> Arc<Mutex<cln_rpc::ClnRpc>> {
RPC_CLIENT
.get_or_init(|| async {
loop {
match cln_rpc::ClnRpc::new(path.as_ref()).await {
Ok(client) => {
debug!("Connected to lightning-rpc.");
return Arc::new(Mutex::new(client));
}
Err(_) => {
debug!("Failed to connect to lightning-rpc. Retrying in {RPC_POLL_INTERVAL:?}...");
tokio::time::sleep(RPC_POLL_INTERVAL).await;
continue;
}
}
}
})
.await
.clone()
}
lazy_static! {
static ref HSM_ID_COUNT: AtomicUsize = AtomicUsize::new(0);
/// The number of signers that are currently connected (best guess
/// due to races). Allows us to determine whether we should
/// initiate operations that might require signatures.
static ref SIGNER_COUNT: AtomicUsize = AtomicUsize::new(0);
static ref RPC_BCAST: broadcast::Sender<super::Event> = broadcast::channel(4).0;
static ref SERIALIZED_CONFIGURE_REQUEST: Mutex<Option<String>> = Mutex::new(None);
static ref RPC_READY: AtomicBool = AtomicBool::new(false);
}
/// The PluginNodeServer is the interface that is exposed to client devices
/// and is in charge of coordinating the various user-controlled
/// entities. This includes dispatching incoming RPC calls to the JSON-RPC
/// interface, as well as staging requests from the HSM so that they can be
/// streamed and replied to by devices that have access to the signing keys.
#[derive(Clone)]
pub struct PluginNodeServer {
pub tls: ServerTlsConfig,
pub stage: Arc<stager::Stage>,
rpc_path: PathBuf,
events: tokio::sync::broadcast::Sender<super::Event>,
signer_state: Arc<Mutex<State>>,
grpc_binding: String,
signer_state_store: Arc<Mutex<Box<dyn StateStore>>>,
pub ctx: crate::context::Context,
notifications: tokio::sync::broadcast::Sender<Notification>,
}
impl PluginNodeServer {
pub async fn new(
stage: Arc<stager::Stage>,
config: Config,
events: tokio::sync::broadcast::Sender<super::Event>,
notifications: tokio::sync::broadcast::Sender<Notification>,
signer_state_store: Box<dyn StateStore>,
) -> Result<Self, Error> {
let tls = ServerTlsConfig::new()
.identity(config.identity.id)
.client_ca_root(config.identity.ca);
let mut rpc_path = std::env::current_dir().unwrap();
rpc_path.push("lightning-rpc");
info!("Connecting to lightning-rpc at {:?}", rpc_path);
// Bridge the RPC_BCAST into the events queue
let tx = events.clone();
tokio::spawn(async move {
let mut rx = RPC_BCAST.subscribe();
loop {
if let Ok(e) = rx.recv().await {
let _ = tx.send(e);
}
}
});
let signer_state = signer_state_store.read().await?;
let ctx = crate::context::Context::new();
let s = PluginNodeServer {
ctx,
tls,
stage,
events,
rpc_path: rpc_path.clone(),
signer_state: Arc::new(Mutex::new(signer_state)),
signer_state_store: Arc::new(Mutex::new(signer_state_store)),
grpc_binding: config.node_grpc_binding,
notifications,
};
tokio::spawn(async move {
let rpc_arc = get_rpc(&rpc_path).await.clone();
let mut rpc = rpc_arc.lock().await;
let list_datastore_req = cln_rpc::model::requests::ListdatastoreRequest {
key: Some(vec!["glconf".to_string(), "request".to_string()]),
};
let res = rpc.call_typed(&list_datastore_req).await;
match res {
Ok(list_datastore_res) => {
if list_datastore_res.datastore.len() > 0 {
let serialized_configure_request =
list_datastore_res.datastore[0].string.clone();
match serialized_configure_request {
Some(serialized_configure_request) => {
let mut cached_serialized_configure_request =
SERIALIZED_CONFIGURE_REQUEST.lock().await;
*cached_serialized_configure_request =
Some(serialized_configure_request);
}
None => {}
}
}
}
Err(_) => {}
}
});
Ok(s)
}
// Wait for the limiter to allow a new RPC call
pub async fn limit(&self) {
let limiter = LIMITER
.get_or_init(|| async {
let quota = Quota::per_minute(core::num::NonZeroU32::new(300).unwrap());
RateLimiter::direct_with_clock(quota, &MonotonicClock::default())
})
.await;
limiter.until_ready().await
}
}
#[tonic::async_trait]
impl Node for PluginNodeServer {
type StreamCustommsgStream = ReceiverStream<Result<pb::Custommsg, Status>>;
type StreamHsmRequestsStream = ReceiverStream<Result<pb::HsmRequest, Status>>;
type StreamLogStream = ReceiverStream<Result<pb::LogEntry, Status>>;
async fn lsp_invoice(
&self,
req: Request<pb::LspInvoiceRequest>,
) -> Result<Response<pb::LspInvoiceResponse>, Status> {
let req: pb::LspInvoiceRequest = req.into_inner();
let mut invreq: crate::requests::LspInvoiceRequest = req.into();
let rpc_arc = get_rpc(&self.rpc_path).await;
let mut rpc = rpc_arc.lock().await;
// In case the client did not specify an LSP to work with,
// let's enumerate them, and select the best option ourselves.
let lsps = self.get_lsps_offers(&mut rpc).await.map_err(|_e| {
Status::not_found("Could not retrieve LSPS peers for invoice negotiation.")
})?;
if lsps.len() < 1 {
return Err(Status::not_found(
"Could not find an LSP peer to negotiate the LSPS2 channel for this invoice.",
));
}
let lsp = &lsps[0];
log::info!("Selecting {:?} for invoice negotiation", lsp);
invreq.lsp_id = lsp.node_id.to_owned();
let res = rpc
.call_typed(&invreq)
.await
.map_err(|e| Status::new(Code::Internal, e.to_string()))?;
Ok(Response::new(res.into()))
}
async fn stream_custommsg(
&self,
_: Request<pb::StreamCustommsgRequest>,
) -> Result<Response<Self::StreamCustommsgStream>, Status> {
log::debug!("Added a new listener for custommsg");
let (tx, rx) = mpsc::channel(1);
let mut stream = self.events.subscribe();
// TODO: We can do better by returning the broadcast receiver
// directly. Well really we should be filtering the events by
// type, so maybe a `.map()` on the stream can work?
tokio::spawn(async move {
while let Ok(msg) = stream.recv().await {
if let Event::CustomMsg(m) = msg {
log::trace!("Forwarding custommsg {:?} to listener", m);
if let Err(e) = tx.send(Ok(m)).await {
log::warn!("Unable to send custmmsg to listener: {:?}", e);
break;
}
}
}
panic!("stream.recv loop exited...");
});
return Ok(Response::new(ReceiverStream::new(rx)));
}
async fn stream_log(
&self,
_: Request<pb::StreamLogRequest>,
) -> Result<Response<Self::StreamLogStream>, Status> {
match async {
let (tx, rx) = mpsc::channel(1);
let mut lines = linemux::MuxedLines::new()?;
lines.add_file("/tmp/log").await?;
// TODO: Yes, this may produce duplicate lines, when new
// log entries are produced while we're streaming the
// backlog out, but do we care?
use tokio::io::{AsyncBufReadExt, BufReader};
// The nodelet uses its CWD, but CLN creates a network
// subdirectory
let file = tokio::fs::File::open("../log").await?;
let mut file = BufReader::new(file).lines();
tokio::spawn(async move {
match async {
while let Some(line) = file.next_line().await? {
tx.send(Ok(pb::LogEntry {
line: line.trim().to_owned(),
}))
.await?
}
while let Ok(Some(line)) = lines.next_line().await {
tx.send(Ok(pb::LogEntry {
line: line.line().trim().to_string(),
}))
.await?;
}
Ok(())
}
.await as Result<(), anyhow::Error>
{
Ok(()) => {}
Err(e) => {
warn!("error streaming logs to client: {}", e);
}
}
});
Ok(ReceiverStream::new(rx))
}
.await as Result<Self::StreamLogStream, anyhow::Error>
{
Ok(v) => Ok(Response::new(v)),
Err(e) => Err(Status::new(Code::Internal, e.to_string())),
}
}
async fn stream_hsm_requests(
&self,
_request: Request<pb::Empty>,
) -> Result<Response<Self::StreamHsmRequestsStream>, Status> {
let hsm_id = HSM_ID_COUNT.fetch_add(1, Ordering::SeqCst);
SIGNER_COUNT.fetch_add(1, Ordering::SeqCst);
info!(
"New signer with hsm_id={} attached, streaming requests",
hsm_id
);
let (tx, rx) = mpsc::channel(10);
let mut stream = self.stage.mystream().await;
let signer_state = self.signer_state.clone();
let ctx = self.ctx.clone();
tokio::spawn(async move {
trace!("hsmd hsm_id={} request processor started", hsm_id);
{
// We start by immediately injecting a
// vls_protocol::Message::GetHeartbeat. This serves two
// purposes: already send the initial snapshot of the
// signer state to the signer as early as possible, and
// triggering a pruning on the signer, if enabled. In
// incremental mode this ensures that any subsequent,
// presumably time-critical messages, do not have to carry
// the large state with them.
let state = signer_state.lock().await.clone();
let state: Vec<gl_client::pb::SignerStateEntry> = state.into();
let state: Vec<pb::SignerStateEntry> = state
.into_iter()
.map(|s| pb::SignerStateEntry {
key: s.key,
version: s.version,
value: s.value,
})
.collect();
let msg = vls_protocol::msgs::GetHeartbeat {};
use vls_protocol::msgs::SerBolt;
let req = crate::pb::HsmRequest {
// Notice that the request_counter starts at 1000, to
// avoid collisions.
request_id: 0,
signer_state: state,
raw: msg.as_vec(),
requests: vec![], // No pending requests yet, nothing to authorize.
context: None,
};
if let Err(e) = tx.send(Ok(req)).await {
log::warn!("Failed to send heartbeat message to signer: {}", e);
}
}
loop {
let mut req = match stream.next().await {
Err(e) => {
error!(
"Could not get next request from stage: {:?} for hsm_id={}",
e, hsm_id
);
break;
}
Ok(r) => r,
};
trace!(
"Sending request={} to hsm_id={}",
req.request.request_id,
hsm_id
);
let state = signer_state.lock().await.clone();
let state: Vec<gl_client::pb::SignerStateEntry> = state.into();
// TODO Consolidate protos in `gl-client` and `gl-plugin`, then remove this map.
let state: Vec<pb::SignerStateEntry> = state
.into_iter()
.map(|s| pb::SignerStateEntry {
key: s.key,
version: s.version,
value: s.value,
})
.collect();
req.request.signer_state = state.into();
req.request.requests = ctx.snapshot().await.into_iter().map(|r| r.into()).collect();
let serialized_configure_request = SERIALIZED_CONFIGURE_REQUEST.lock().await;
match &(*serialized_configure_request) {
Some(serialized_configure_request) => {
let configure_request = serde_json::from_str::<crate::context::Request>(
serialized_configure_request,
)
.unwrap();
req.request.requests.push(configure_request.into());
}
None => {}
}
debug!(
"Sending signer requests with {} requests and {} state entries",
req.request.requests.len(),
req.request.signer_state.len()
);
eprintln!("WIRE: plugin -> signer: {:?}", req);
if let Err(e) = tx.send(Ok(req.request)).await {
warn!("Error streaming request {:?} to hsm_id={}", e, hsm_id);
break;
}
}
info!("Signer hsm_id={} exited", hsm_id);
SIGNER_COUNT.fetch_sub(1, Ordering::SeqCst);
});
trace!("Returning stream_hsm_request channel");
Ok(Response::new(ReceiverStream::new(rx)))
}
async fn respond_hsm_request(
&self,
request: Request<pb::HsmResponse>,
) -> Result<Response<pb::Empty>, Status> {
let req = request.into_inner();
if req.error != "" {
log::error!("Signer reports an error: {}", req.error);
log::warn!("The above error was returned instead of a response.");
return Ok(Response::new(pb::Empty::default()));
}
eprintln!("WIRE: signer -> plugin: {:?}", req);
// Create a state from the key-value-version tuples. Need to
// convert here, since `pb` is duplicated in the two different
// crates.
let signer_state: Vec<gl_client::pb::SignerStateEntry> = req
.signer_state
.iter()
.map(|i| gl_client::pb::SignerStateEntry {
key: i.key.to_owned(),
value: i.value.to_owned(),
version: i.version,
})
.collect();
let new_state: gl_client::persist::State = signer_state.into();
// Apply state changes to the in-memory state
let mut state = self.signer_state.lock().await;
state.merge(&new_state).map_err(|e| {
Status::new(
Code::Internal,
format!("Error updating internal state: {e}"),
)
})?;
// Send changes to the signer_state_store for persistence
let store = self.signer_state_store.lock().await;
if let Err(e) = store.write(state.clone()).await {
log::warn!(
"The returned state could not be stored. Ignoring response for request_id={}, error={:?}",
req.request_id, e
);
/* Exit here so we don't end up committing the changes
* to CLN, but not to the state store. That'd cause
* drifts in states that are very hard to debug, and
* harder to correct. */
return Ok(Response::new(pb::Empty::default()));
}
if let Err(e) = self.stage.respond(req).await {
warn!("Suppressing error: {:?}", e);
}
Ok(Response::new(pb::Empty::default()))
}
type StreamIncomingStream = ReceiverStream<Result<pb::IncomingPayment, Status>>;
async fn stream_incoming(
&self,
_req: tonic::Request<pb::StreamIncomingFilter>,
) -> Result<Response<Self::StreamIncomingStream>, Status> {
// TODO See if we can just return the broadcast::Receiver
// instead of pulling off broadcast and into an mpsc.
let (tx, rx) = mpsc::channel(1);
let mut bcast = self.events.subscribe();
tokio::spawn(async move {
while let Ok(p) = bcast.recv().await {
match p {
super::Event::IncomingPayment(p) => {
let _ = tx.send(Ok(p)).await;
}
_ => {}
}
}
});
return Ok(Response::new(ReceiverStream::new(rx)));
}
async fn configure(
&self,
req: tonic::Request<pb::GlConfig>,
) -> Result<Response<pb::Empty>, Status> {
self.limit().await;
let gl_config = req.into_inner();
let rpc_arc = get_rpc(&self.rpc_path).await;
let mut rpc = rpc_arc.lock().await;
let res = rpc
.call_typed(&cln_rpc::model::requests::GetinfoRequest {})
.await;
let network = match res {
Ok(get_info_response) => match get_info_response.network.parse() {
Ok(v) => v,
Err(_) => Err(Status::new(
Code::Unknown,
format!("Failed to parse 'network' from 'getinfo' response"),
))?,
},
Err(e) => {
return Err(Status::new(
Code::Unknown,
format!("Failed to retrieve a response from 'getinfo' while setting the node's configuration: {}", e),
));
}
};
match bitcoin::Address::from_str(&gl_config.close_to_addr) {
Ok(address) => {
if address.network != network {
return Err(Status::new(
Code::Unknown,
format!(
"Network mismatch: \
Expected an address for {} but received an address for {}",
network, address.network
),
));
}
}
Err(e) => {
return Err(Status::new(
Code::Unknown,
format!(
"The address {} is not valid: {}",
gl_config.close_to_addr, e
),
));
}
}
let requests: Vec<crate::context::Request> = self
.ctx
.snapshot()
.await
.into_iter()
.map(|r| r.into())
.collect();
let serialized_req = serde_json::to_string(&requests[0]).unwrap();
let datastore_res = rpc
.call_typed(&cln_rpc::model::requests::DatastoreRequest {
key: vec!["glconf".to_string(), "request".to_string()],
string: Some(serialized_req.clone()),
hex: None,
mode: None,
generation: None,
})
.await;
match datastore_res {
Ok(_) => {
let mut cached_gl_config = SERIALIZED_CONFIGURE_REQUEST.lock().await;
*cached_gl_config = Some(serialized_req);
Ok(Response::new(pb::Empty::default()))
}
Err(e) => {
return Err(Status::new(
Code::Unknown,
format!(
"Failed to store the raw configure request in the datastore: {}",
e
),
))
}
}
}
async fn trampoline_pay(
&self,
r: tonic::Request<pb::TrampolinePayRequest>,
) -> Result<tonic::Response<pb::TrampolinePayResponse>, Status> {
tramp::trampolinepay(r.into_inner(), self.rpc_path.clone())
.await
.map(cln_rpc::model::responses::PayResponse::into)
.map(|res: cln_grpc::pb::PayResponse| {
tonic::Response::new(pb::TrampolinePayResponse {
payment_preimage: res.payment_preimage,
payment_hash: res.payment_hash,
created_at: res.created_at,
parts: res.parts,
amount_msat: res.amount_msat.unwrap_or_default().msat,
amount_sent_msat: res.amount_sent_msat.unwrap_or_default().msat,
destination: res.destination.unwrap_or_default(),
})
})
.map_err(|err| {
debug!("Trampoline payment failed: {}", err);
err.into()
})
}
}
use cln_grpc::pb::node_server::NodeServer;
#[derive(Clone, Debug)]
struct Lsps2Offer {
node_id: String,
#[allow(unused)]
params: Vec<crate::responses::OpeningFeeParams>,
}
impl PluginNodeServer {
pub async fn run(self) -> Result<()> {
let addr = self.grpc_binding.parse().unwrap();
let cln_node = NodeServer::new(
WrappedNodeServer::new(self.clone())
.await
.context("creating NodeServer instance")?,
);
let router = tonic::transport::Server::builder()
.max_frame_size(4 * 1024 * 1024) // 4MB max request size
.tcp_keepalive(Some(tokio::time::Duration::from_secs(1)))
.tls_config(self.tls.clone())?
.layer(SignatureContextLayer {
ctx: self.ctx.clone(),
})
.add_service(RpcWaitService::new(cln_node, self.rpc_path.clone()))
.add_service(crate::pb::node_server::NodeServer::new(self.clone()));
router
.serve(addr)
.await
.context("grpc interface exited with error")
}
/// Reconnect all peers with whom we have a channel or previously
/// connected explicitly to.
pub async fn reconnect_peers(&self) -> Result<(), Error> {
if SIGNER_COUNT.load(Ordering::SeqCst) < 1 {
use anyhow::anyhow;
return Err(anyhow!(
"Cannot reconnect peers, no signer to complete the handshake"
));
}
log::info!("Reconnecting all peers (plugin)");
let peers = self.get_reconnect_peers().await?;
log::info!(
"Found {} peers to reconnect: {:?} (plugin)",
peers.len(),
peers.iter().map(|p| p.id.clone())
);
let rpc_arc = get_rpc(&self.rpc_path).await;
let mut rpc = rpc_arc.lock().await;
for r in peers {
trace!("Calling connect: {:?} (plugin)", &r.id);
let res = rpc.call_typed(&r).await;
trace!("Connect returned: {:?} -> {:?} (plugin)", &r.id, res);
match res {
Ok(r) => info!("Connection to {} established: {:?} (plugin)", &r.id, r),
Err(e) => warn!("Could not connect to {}: {:?} (plugin)", &r.id, e),
}
}
return Ok(());
}
async fn list_peers(
&self,
rpc: &mut cln_rpc::ClnRpc,
) -> Result<cln_rpc::model::responses::ListpeersResponse, Error> {
rpc.call_typed(&cln_rpc::model::requests::ListpeersRequest {
id: None,
level: None,
})
.await
.map_err(|e| e.into())
}
async fn get_lsps_offers(&self, rpc: &mut cln_rpc::ClnRpc) -> Result<Vec<Lsps2Offer>, Error> {
// Collect peers offering LSP functionality
let lpeers = self.list_peers(rpc).await?;
// Filter out the ones that do not announce the LSPs features.
// TODO: Re-enable the filtering once the cln-lsps-service plugin announces the features.
let _lsps: Vec<cln_rpc::model::responses::ListpeersPeers> = lpeers
.peers
.into_iter()
//.filter(|p| has_feature(
// hex::decode(p.features.clone().unwrap_or_default()).expect("featurebits are hex"),
// OPT_SUPPORTS_LSPS
//))
.collect();
// Query all peers for their LSPS offers, but with a brief
// timeout so the invoice creation isn't help up too long.
let futs: Vec<
tokio::task::JoinHandle<(
String,
Result<
Result<crate::responses::LspGetinfoResponse, cln_rpc::RpcError>,
tokio::time::error::Elapsed,
>,
)>,
> = _lsps
.into_iter()
.map(|peer| {
let rpc_path = self.rpc_path.clone();
tokio::spawn(async move {
let peer_id = format!("{:x}", peer.id);
let mut rpc = cln_rpc::ClnRpc::new(rpc_path.clone()).await.unwrap();
let req = crate::requests::LspGetinfoRequest {
lsp_id: peer_id.clone(),
token: None,
};
(
peer_id,
tokio::time::timeout(
tokio::time::Duration::from_secs(2),
rpc.call_typed(&req),
)
.await,
)
})
})
.collect();
let mut res = vec![];
for f in futs {
match f.await {
//TODO We need to drag the node_id along.
Ok((node_id, Ok(Ok(r)))) => res.push(Lsps2Offer {
node_id: node_id,
params: r.opening_fee_params_menu,
}),
Ok((node_id, Err(e))) => warn!(
"Error fetching LSPS menu items from peer_id={}: {:?}",
node_id, e
),
Ok((node_id, Ok(Err(e)))) => warn!(
"Error fetching LSPS menu items from peer_id={}: {:?}",
node_id, e
),
Err(_) => warn!("Timeout fetching LSPS menu items"),
}
}
log::info!("Gathered {} LSP menus", res.len());
log::trace!("LSP menus: {:?}", &res);
Ok(res)
}
async fn get_reconnect_peers(
&self,
) -> Result<Vec<cln_rpc::model::requests::ConnectRequest>, Error> {
let rpc_arc = get_rpc(&self.rpc_path).await;
let mut rpc = rpc_arc.lock().await;
let peers = self.list_peers(&mut rpc).await?;
let mut requests: Vec<cln_rpc::model::requests::ConnectRequest> = peers
.peers
.iter()
.filter(|&p| p.connected)
.map(|p| cln_rpc::model::requests::ConnectRequest {
id: p.id.to_string(),
host: None,
port: None,
})
.collect();
let mut dspeers: Vec<cln_rpc::model::requests::ConnectRequest> = rpc
.call_typed(&cln_rpc::model::requests::ListdatastoreRequest {
key: Some(vec!["greenlight".to_string(), "peerlist".to_string()]),
})
.await?
.datastore
.iter()
.map(|x| {
// We need to replace unnecessary escape characters that
// have been added by the datastore, as serde is a bit
// picky on that.
let mut s = x.string.clone().unwrap();
s = s.replace('\\', "");
serde_json::from_str::<messages::Peer>(&s).unwrap()
})
.map(|x| cln_rpc::model::requests::ConnectRequest {
id: x.id,
host: Some(x.addr),
port: None,
})
.collect();
// Merge the two peer lists;
requests.append(&mut dspeers);
requests.sort_by(|a, b| a.id.cmp(&b.id));
requests.dedup_by(|a, b| a.id.eq(&b.id));
Ok(requests)
}
}
use tower::{Layer, Service};
#[derive(Debug, Clone)]
pub struct SignatureContextLayer {
ctx: crate::context::Context,
}
impl SignatureContextLayer {
pub fn new(context: crate::context::Context) -> Self {
SignatureContextLayer { ctx: context }
}
}
impl<S> Layer<S> for SignatureContextLayer {
type Service = SignatureContextService<S>;
fn layer(&self, service: S) -> Self::Service {
SignatureContextService {
inner: service,
ctx: self.ctx.clone(),
}
}
}
// Is the maximum message size we allow to buffer up on requests.
const MAX_MESSAGE_SIZE: usize = 4000000;
#[derive(Debug, Clone)]
pub struct SignatureContextService<S> {
inner: S,
ctx: crate::context::Context,
}
impl<S> Service<hyper::Request<hyper::Body>> for SignatureContextService<S>
where
S: Service<hyper::Request<hyper::Body>, Response = hyper::Response<tonic::body::BoxBody>>
+ Clone
+ Send
+ 'static,
S::Future: Send + 'static,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
type Response = S::Response;
type Error = Box<dyn std::error::Error + Send + Sync>;
type Future = futures::future::BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: hyper::Request<hyper::Body>) -> Self::Future {
// This is necessary because tonic internally uses `tower::buffer::Buffer`.
// See https://github.com/tower-rs/tower/issues/547#issuecomment-767629149
// for details on why this is necessary
let clone = self.inner.clone();
let mut inner = std::mem::replace(&mut self.inner, clone);
let reqctx = self.ctx.clone();
Box::pin(async move {
use tonic::codegen::Body;
let (parts, mut body) = request.into_parts();
let uri = parts.uri.path_and_query().unwrap();
let _ = RPC_BCAST
.clone()
.send(super::Event::RpcCall(uri.to_string()));
let pubkey = parts
.headers
.get("glauthpubkey")
.and_then(|k| general_purpose::STANDARD_NO_PAD.decode(k).ok());
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | true |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-plugin/src/bin/plugin.rs | libs/gl-plugin/src/bin/plugin.rs | use anyhow::{Context, Error};
use gl_plugin::config::Config;
use gl_plugin::{
hsm,
node::PluginNodeServer,
stager::Stage,
storage::{SledStateStore, StateStore},
Event,
};
use log::info;
use std::env;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Error> {
let cwd = env::current_dir()?;
info!("Running in {}", cwd.to_str().unwrap());
let config = Config::new().context("loading config")?;
let stage = Arc::new(Stage::new());
let (events, _) = tokio::sync::broadcast::channel(16);
let (notifications, _) = tokio::sync::broadcast::channel(16);
let state_store = get_signer_store().await?;
start_hsm_server(config.clone(), stage.clone())?;
start_node_server(
config,
stage.clone(),
events.clone(),
notifications,
state_store,
)
.await?;
let plugin = gl_plugin::init(stage, events).await?;
if let Some(plugin) = plugin.start().await? {
plugin.join().await
} else {
Ok(()) // This is just an invocation with `--help`, we're good to exit
}
}
async fn start_node_server(
config: Config,
stage: Arc<Stage>,
events: tokio::sync::broadcast::Sender<Event>,
notifications: tokio::sync::broadcast::Sender<cln_rpc::notifications::Notification>,
signer_state_store: Box<dyn StateStore>,
) -> Result<(), Error> {
let addr: SocketAddr = config
.node_grpc_binding
.parse()
.context("parsing the node_grpc_binding")?;
let tls = tonic::transport::ServerTlsConfig::new()
.identity(config.identity.id.clone())
.client_ca_root(config.identity.ca.clone());
let mut rpc_path = std::env::current_dir().unwrap();
rpc_path.push("lightning-rpc");
info!(
"Starting grpc server on addr={} serving rpc={}",
addr,
rpc_path.display()
);
let node_server = PluginNodeServer::new(
stage.clone(),
config.clone(),
events.clone(),
notifications.clone(),
signer_state_store,
)
.await?;
let cln_node = gl_plugin::grpc::pb::node_server::NodeServer::new(
gl_plugin::node::WrappedNodeServer::new(node_server.clone())
.await
.context("creating cln_grpc::pb::node_server::NodeServer instance")?,
);
let router = tonic::transport::Server::builder()
.tls_config(tls)?
.layer(gl_plugin::node::SignatureContextLayer::new(
node_server.ctx.clone(),
))
.add_service(gl_plugin::node::RpcWaitService::new(cln_node, rpc_path))
.add_service(gl_plugin::pb::node_server::NodeServer::new(
gl_plugin::node::WrappedNodeServer::new(node_server).await?,
));
tokio::spawn(async move {
router
.serve(addr)
.await
.context("grpc interface exited with error")
});
Ok(())
}
async fn get_signer_store() -> Result<Box<dyn StateStore>, Error> {
let mut state_dir = env::current_dir()?;
state_dir.push("signer_state");
Ok(Box::new(SledStateStore::new(state_dir)?))
}
fn start_hsm_server(config: Config, stage: Arc<Stage>) -> Result<(), Error> {
// We run this already at startup, not at configuration because if
// the signerproxy doesn't find the socket on the FS it'll exit.
let hsm_server = hsm::StagingHsmServer::new(
PathBuf::from_str(&config.hsmd_sock_path).context("hsmd_sock_path is not a valid path")?,
stage.clone(),
config.node_info.clone(),
config.node_config.clone(),
);
tokio::spawn(hsm_server.run());
Ok(())
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/build.rs | libs/gl-signerproxy/build.rs | fn main() {
tonic_build::configure()
.build_client(true)
.compile(
&[".resources/proto/glclient/greenlight.proto"],
&[".resources/proto/glclient"],
)
.unwrap();
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/src/passfd.rs | libs/gl-signerproxy/src/passfd.rs | use libc::{self, c_int, c_uchar, c_void, msghdr};
use log::trace;
use std::io::{Error, ErrorKind};
use std::mem;
use std::os::unix::io::RawFd;
pub trait SyncFdPassingExt {
/// Send RawFd. No type information is transmitted.
fn send_fd(&self, fd: RawFd) -> Result<(), Error>;
/// Receive RawFd. No type information is transmitted.
fn recv_fd(&self) -> Result<RawFd, Error>;
}
impl SyncFdPassingExt for RawFd {
fn send_fd(&self, fd: RawFd) -> Result<(), Error> {
trace!("Sending fd {}", fd);
let mut dummy: c_uchar = 0;
let msg_len = unsafe { libc::CMSG_SPACE(mem::size_of::<c_int>() as u32) as _ };
let mut buf = vec![0u8; msg_len as usize];
let mut iov = libc::iovec {
iov_base: &mut dummy as *mut c_uchar as *mut c_void,
iov_len: mem::size_of_val(&dummy),
};
unsafe {
let hdr = libc::cmsghdr {
cmsg_level: libc::SOL_SOCKET,
cmsg_type: libc::SCM_RIGHTS,
cmsg_len: libc::CMSG_LEN(mem::size_of::<c_int>() as u32) as _,
};
// https://github.com/rust-lang/rust-clippy/issues/2881
#[allow(clippy::cast_ptr_alignment)]
std::ptr::write_unaligned(buf.as_mut_ptr() as *mut _, hdr);
// https://github.com/rust-lang/rust-clippy/issues/2881
#[allow(clippy::cast_ptr_alignment)]
std::ptr::write_unaligned(
libc::CMSG_DATA(buf.as_mut_ptr() as *const _) as *mut c_int,
fd,
);
}
let msg: msghdr = libc::msghdr {
msg_name: std::ptr::null_mut(),
msg_namelen: 0,
msg_iov: &mut iov,
msg_iovlen: 1,
msg_control: buf.as_mut_ptr() as *mut c_void,
msg_controllen: msg_len,
msg_flags: 0,
};
let rv = unsafe { libc::sendmsg(*self, &msg, 0) };
if rv < 0 {
return Err(Error::last_os_error());
}
Ok(())
}
fn recv_fd(&self) -> Result<RawFd, Error> {
trace!("Receiving fd");
let mut dummy: c_uchar = 0;
let msg_len = unsafe { libc::CMSG_SPACE(mem::size_of::<c_int>() as u32) as _ };
let mut buf = vec![0u8; msg_len as usize];
let mut iov = libc::iovec {
iov_base: &mut dummy as *mut c_uchar as *mut c_void,
iov_len: mem::size_of_val(&dummy),
};
let mut msg: msghdr = libc::msghdr {
msg_name: std::ptr::null_mut(),
msg_namelen: 0,
msg_iov: &mut iov,
msg_iovlen: 1,
msg_control: buf.as_mut_ptr() as *mut c_void,
msg_controllen: msg_len,
msg_flags: 0,
};
unsafe {
let rv = libc::recvmsg(*self, &mut msg, 0);
match rv {
0 => Err(Error::new(ErrorKind::UnexpectedEof, "0 bytes read")),
rv if rv < 0 => Err(Error::last_os_error()),
rv if rv == mem::size_of::<c_uchar>() as isize => {
let hdr: *mut libc::cmsghdr = if msg.msg_controllen as usize
>= mem::size_of::<libc::cmsghdr>() as usize
{
msg.msg_control as *mut libc::cmsghdr
} else {
return Err(Error::new(
ErrorKind::InvalidData,
"bad control msg (header)",
));
};
if (*hdr).cmsg_level != libc::SOL_SOCKET || (*hdr).cmsg_type != libc::SCM_RIGHTS
{
return Err(Error::new(
ErrorKind::InvalidData,
"bad control msg (level)",
));
}
if msg.msg_controllen as usize
!= libc::CMSG_SPACE(mem::size_of::<c_int>() as u32) as usize
{
return Err(Error::new(ErrorKind::InvalidData, "bad control msg (len)"));
}
// https://github.com/rust-lang/rust-clippy/issues/2881
#[allow(clippy::cast_ptr_alignment)]
let fd = std::ptr::read_unaligned(libc::CMSG_DATA(hdr) as *mut c_int);
if libc::fcntl(fd, libc::F_SETFD, libc::FD_CLOEXEC) < 0 {
return Err(Error::last_os_error());
}
Ok(fd)
}
_ => Err(Error::new(
ErrorKind::InvalidData,
"bad control msg (ret code)",
)),
}
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/src/lib.rs | libs/gl-signerproxy/src/lib.rs | mod hsmproxy;
mod passfd;
mod pb;
mod wire;
use anyhow::Result;
pub struct Proxy {}
impl Proxy {
pub fn new() -> Proxy {
Proxy {}
}
pub fn run(&self) -> Result<()> {
hsmproxy::run()
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/src/hsmproxy.rs | libs/gl-signerproxy/src/hsmproxy.rs | // Implementation of the server-side hsmd. It collects requests and passes
// them on to the clients which actually have access to the keys.
use crate::pb::{hsm_client::HsmClient, Empty, HsmRequest, HsmRequestContext};
use crate::wire::{DaemonConnection, Message};
use anyhow::{anyhow, Context};
use anyhow::{Error, Result};
use log::{debug, error, info, warn};
use std::convert::TryFrom;
use std::env;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::os::unix::net::UnixStream;
use std::path::PathBuf;
use std::process::Command;
use std::str;
use std::sync::atomic;
use std::sync::Arc;
use std::thread;
use tokio::runtime::Runtime;
use tonic::transport::{Endpoint, Uri};
use tower::service_fn;
use which::which;
type GrpcClient = HsmClient<tonic::transport::Channel>;
fn get_sock_path() -> Result<String> {
Ok(env::var("HSMD_SOCK_PATH").unwrap_or("hsmd.sock".to_string()))
}
struct NodeConnection {
conn: DaemonConnection,
context: Option<HsmRequestContext>,
}
fn version() -> String {
let path = which("lightning_hsmd").expect("could not find HSM executable in PATH");
let version = Command::new(path)
.args(&["--version"])
.output()
.expect("failed to execute process");
str::from_utf8(&version.stdout).unwrap().trim().to_string()
}
fn setup_node_stream() -> Result<DaemonConnection, Error> {
let ms = unsafe { UnixStream::from_raw_fd(3) };
Ok(DaemonConnection::new(ms))
}
fn start_handler(
local: NodeConnection,
counter: Arc<atomic::AtomicUsize>,
grpc: GrpcClient,
runtime: Arc<Runtime>,
) {
thread::spawn(move || {
match process_requests(local, counter, grpc, runtime).context("processing requests") {
Ok(()) => panic!("why did the hsmproxy stop processing requests without an error?"),
Err(e) => warn!("hsmproxy stopped processing requests with error: {}", e),
}
});
}
fn process_requests(
node_conn: NodeConnection,
request_counter: Arc<atomic::AtomicUsize>,
mut server: GrpcClient,
runtime: Arc<Runtime>,
) -> Result<(), Error> {
let conn = node_conn.conn;
let context = node_conn.context;
info!("Pinging server");
runtime.block_on(server.ping(Empty::default()))?;
loop {
if let Ok(msg) = conn.read() {
match msg.msgtype() {
9 => {
eprintln!("Got a message from node: {:?}", &msg.body);
// This requests a new client fd with a given context,
// handle it locally, and defer the creation of the client
// fd on the server side until we need it.
let ctx = HsmRequestContext::from_client_hsmfd_msg(&msg)?;
eprintln!("Got a request for a new client fd. Context: {:?}", ctx);
let (local, remote) = UnixStream::pair()?;
let local = NodeConnection {
conn: DaemonConnection::new(local),
context: Some(ctx),
};
let remote = remote.as_raw_fd();
let msg = Message::new_with_fds(vec![0, 109], &vec![remote]);
let grpc = server.clone();
// Start new handler for the client
start_handler(local, request_counter.clone(), grpc, runtime.clone());
if let Err(e) = conn.write(msg) {
error!("error writing msg to node_connection: {:?}", e);
return Err(e);
}
}
_ => {
// By default we forward to the remote HSMd
let req = tonic::Request::new(HsmRequest {
context: context.clone(),
raw: msg.body.clone(),
request_id: request_counter.fetch_add(1, atomic::Ordering::Relaxed) as u32,
requests: Vec::new(),
signer_state: Vec::new(),
});
eprintln!(
"WIRE: lightningd -> hsmd: Got a message from node: {:?}",
&req
);
let start_time = tokio::time::Instant::now();
debug!("Got a message from node: {:?}", &req);
let res = runtime.block_on(server.request(req))?.into_inner();
let delta = start_time.elapsed();
let msg = Message::from_raw(res.raw);
eprintln!(
"WIRE: plugin -> hsmd: Got respone from hsmd: {:?} after {}ms",
&msg,
delta.as_millis()
);
eprintln!("WIRE: hsmd -> lightningd: {:?}", &msg);
conn.write(msg)?
}
}
} else {
error!("Connection lost");
return Err(anyhow!("Connection lost"));
}
}
}
fn grpc_connect(runtime: &Runtime) -> Result<GrpcClient, Error> {
runtime.block_on(async {
// We will ignore this uri because uds do not use it
// if your connector does use the uri it will be provided
// as the request to the `MakeConnection`.
// Connect to a Uds socket
let channel = Endpoint::try_from("http://[::]:50051")?
.connect_with_connector(service_fn(|_: Uri| {
let sock_path = get_sock_path().unwrap();
let mut path = PathBuf::new();
if !sock_path.starts_with('/') {
path.push(env::current_dir().unwrap());
}
path.push(&sock_path);
let path = path.to_str().unwrap().to_string();
info!("Connecting to hsmserver at {}", path);
tokio::net::UnixStream::connect(path)
}))
.await
.context("could not connect to the socket file")?;
Ok(HsmClient::new(channel))
})
}
pub fn run() -> Result<(), Error> {
let args: Vec<String> = std::env::args().collect();
// Start the counter at 1000 so we can inject some message before
// real requests if we want to.
let request_counter = Arc::new(atomic::AtomicUsize::new(1000));
if args.len() == 2 && args[1] == "--version" {
println!("{}", version());
return Ok(());
}
info!("Starting hsmproxy");
// Create a dedicated tokio runtime for gRPC operations
let runtime = Arc::new(
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.context("failed to create tokio runtime")?,
);
let node = setup_node_stream()?;
let grpc = grpc_connect(&runtime)?;
process_requests(
NodeConnection {
conn: node,
context: None,
},
request_counter,
grpc,
runtime,
)
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/src/pb.rs | libs/gl-signerproxy/src/pb.rs | use crate::wire::Message;
use anyhow::{anyhow, Result};
use byteorder::{BigEndian, ByteOrder};
tonic::include_proto!("greenlight");
impl HsmRequestContext {
pub fn from_client_hsmfd_msg(msg: &Message) -> Result<HsmRequestContext> {
if msg.msgtype() != 9 {
return Err(anyhow!("message is not an init"));
}
let node_id = &msg.body[2..35];
let dbid = BigEndian::read_u64(&msg.body[35..43]);
let caps = BigEndian::read_u64(&msg.body[43..51]);
Ok(HsmRequestContext {
dbid,
node_id: node_id.to_vec(),
capabilities: caps,
})
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/src/wire.rs | libs/gl-signerproxy/src/wire.rs | use crate::passfd::SyncFdPassingExt;
use anyhow::{anyhow, Error, Result};
use byteorder::{BigEndian, ByteOrder};
use log::trace;
use std::io::{Read, Write};
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::net::UnixStream;
use std::sync::Mutex;
/// A simple implementation of the inter-daemon protocol wrapping a
/// UnixStream. Easy to read from and write to.
pub struct DaemonConnection {
conn: Mutex<UnixStream>,
}
#[derive(Clone, Debug)]
pub struct Message {
typ: u16,
pub body: Vec<u8>,
pub(crate) fds: Vec<RawFd>,
}
impl Message {
pub fn from_raw(raw: Vec<u8>) -> Message {
Message::new(raw)
}
pub fn new(raw: Vec<u8>) -> Message {
Message {
typ: BigEndian::read_u16(&raw),
body: raw,
fds: vec![],
}
}
pub fn msgtype(&self) -> u16 {
self.typ
}
pub fn new_with_fds(raw: Vec<u8>, fds: &[RawFd]) -> Message {
Message {
typ: BigEndian::read_u16(&raw),
body: raw,
fds: fds.to_vec(),
}
}
}
impl PartialEq for Message {
fn eq(&self, other: &Self) -> bool {
self.body == other.body && self.typ == other.typ && self.fds == other.fds
}
}
impl DaemonConnection {
pub fn new(connection: UnixStream) -> DaemonConnection {
DaemonConnection {
conn: Mutex::new(connection),
}
}
fn count_fds(typ: u16) -> i8 {
match typ {
109 => 1,
_ => 0,
}
}
pub fn read(&self) -> Result<Message, Error> {
let mut sock = self.conn.lock().unwrap();
// Read 4-byte length prefix in big-endian
let mut len_buf = [0u8; 4];
sock.read_exact(&mut len_buf)?;
let msglen = BigEndian::read_u32(&len_buf);
// Read the message body
let mut buf = vec![0u8; msglen as usize];
sock.read_exact(&mut buf)?;
if buf.len() < msglen as usize {
return Err(anyhow!("Short read from client"));
}
let typ = BigEndian::read_u16(&buf);
let mut fds = vec![];
// Receive any file descriptors associated with this message type
let numfds = DaemonConnection::count_fds(typ);
for _ in 0..numfds {
fds.push(sock.as_raw_fd().recv_fd()?);
}
if fds.len() == 0 {
Ok(Message::new(buf))
} else {
Ok(Message::new_with_fds(buf, &fds))
}
}
pub fn write(&self, msg: Message) -> Result<(), Error> {
trace!(
"Sending message {} ({} bytes, {} FDs)",
msg.typ,
msg.body.len(),
msg.fds.len()
);
let mut client = self.conn.lock().unwrap();
// Write 4-byte length prefix in big-endian
let mut len_buf = [0u8; 4];
BigEndian::write_u32(&mut len_buf, msg.body.len() as u32);
client.write_all(&len_buf)?;
// Write the message body
client.write_all(&msg.body)?;
// Send any file descriptors
for fd in msg.fds {
client.as_raw_fd().send_fd(fd)?;
}
Ok(())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-signerproxy/src/bin/signerproxy.rs | libs/gl-signerproxy/src/bin/signerproxy.rs | use anyhow::Result;
use gl_signerproxy::Proxy;
fn main() -> Result<()> {
env_logger::builder()
.target(env_logger::Target::Stderr)
.init();
Proxy::new().run()
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-util/src/lib.rs | libs/gl-util/src/lib.rs | pub mod error;
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-util/src/error.rs | libs/gl-util/src/error.rs | //! # Greenlight Error Module
//!
//! This module provides a comprehensive error handling system for
//! greenlight. It features a generic error type that can be customized with
//! module- or crate-specific error codes, while maintaining compatibility
//! with gRPC status codes and providing rich error context.
use bytes::Bytes;
use core::error::Error as StdError;
use serde::{Deserialize, Serialize};
use serde_json;
use std::sync::Arc;
use tonic;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ParsingError(String);
impl core::fmt::Display for ParsingError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "could not parse error: {}", self.0)
}
}
impl StdError for ParsingError {}
// Convenient macro for creating ParsingError
macro_rules! parsing_error {
($($arg:tt)*) => {
ParsingError(format!($($arg)*))
};
}
/// Creates an `Error` with the specified `ErrorCode` and optional formatted
/// message.
///
/// This macro provides a convenient and efficient way to construct `Error`
/// instances with compile-time optimizations and meaningful default messages.
///
/// # Arguments
///
/// * `$code` - An `ErrorCode` variant that categorizes the error type
/// * `$msg` - *(Optional)* A string literal for static messages
/// * `$fmt` - *(Optional)* A format string literal with `{}` placeholders
/// * `$args` - *(Optional)* Arguments corresponding to format placeholders
///
/// # See Also
///
/// - [`format!`] - For understanding format string syntax
/// - [`Error::new`] - The underlying constructor being called
/// - [`std::fmt`] - For advanced formatting options
///
/// [`format!`]: std::format
/// [`Error::new`]: crate::Error::new
/// [`std::fmt`]: std::fmt
#[macro_export]
macro_rules! error {
// Default message variant - generates meaningful message from error code
($code:expr) => {
$crate::error::Error::new($code, concat!(stringify!($code), " occurred"))
};
// Static message variant - most efficient for literal strings
($code:expr, $msg:literal) => {
$crate::error::Error::new($code, $msg)
};
// Formatted message variant - with compile-time format validation
($code:expr, $fmt:literal, $($args:expr),+ $(,)?) => {
$crate::error::Error::new($code, format!($fmt, $($args),+))
};
}
/// Trait for defining module-specific error codes.
///
/// This trait should be implemented by enums that represent different error
/// categories in greenlight. The error codes should be unique integers that
/// can be serialized and transmitted over the network.
pub trait ErrorCode: core::fmt::Debug + core::fmt::Display + Clone + Send + Sync + 'static {
/// Returns the numeric error code for this error type.
///
/// This code should be unique within your application and stable
/// across versions for backward compatibility.
fn code(&self) -> i32;
/// Attempts to construct an error code from its numeric representation.
///
/// Returns `None` if the code is not recognized.
fn from_code(code: i32) -> Option<Self>
where
Self: Sized;
}
/// JSON structure for transmitting error details over gRPC.
///
/// This structure is serialized into the `details` field of a
/// `tonic::Status` to provide structured error information that can be
/// parsed by clients.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GrpcErrorDetails {
pub code: i32,
/// Optional hint to help users resolve the issue
#[serde(skip_serializing_if = "Option::is_none")]
pub hint: Option<String>,
}
/// Extracted error information from a gRPC status.
///
/// This structure contains all the error information that was transmitted
/// over gRPC, including both the standard gRPC fields and our custom
/// structured error details.
#[derive(Debug, Clone)]
pub struct GrpcErrorInfo {
pub code: i32,
pub message: String,
pub hint: Option<String>,
pub grpc_code: tonic::Code,
}
/// Attempts to parse structured error information from a `tonic::Status`.
///
/// This implementation expects the status details to contain a JSON-encoded
/// `GrpcErrorDetails` structure. If parsing fails, a `serde_json::Error` is
/// returned.
impl TryFrom<tonic::Status> for GrpcErrorInfo {
type Error = serde_json::Error;
fn try_from(value: tonic::Status) -> std::result::Result<Self, Self::Error> {
let parsed = serde_json::from_slice::<GrpcErrorDetails>(&value.details())?;
Ok(GrpcErrorInfo {
code: parsed.code,
message: value.message().to_owned(),
hint: parsed.hint,
grpc_code: value.code(),
})
}
}
/// Extension trait for mapping error codes to gRPC status codes.
///
/// This trait should be implemented alongside `ErrorCode` to define
/// how your greenlight-specific errors map to standard gRPC status codes.
pub trait ErrorStatusConversionExt: ErrorCode {
/// Maps this error to an appropriate gRPC status code.
///
/// The returned status code should follow gRPC conventions:
/// See: https://grpc.io/docs/guides/status-codes/
fn status_code(&self) -> tonic::Code;
}
/// Generic error type that combines error codes with rich error context.
#[derive(Debug, Clone)]
pub struct Error<C: ErrorCode> {
/// Error code for categorization and programmatic handling
pub code: C,
/// User-facing error message
pub message: String,
/// Optional hint to help users resolve the issue
pub hint: Option<String>,
/// Context for debugging
pub context: Option<String>,
/// Source error chain for debugging
pub source: Option<Arc<dyn StdError + Send + Sync>>,
}
impl<C: ErrorCode> Error<C> {
/// Creates a new error with the given code and message.
pub fn new(code: C, message: impl Into<String>) -> Self {
Self {
code,
message: message.into(),
hint: None,
context: None,
source: None,
}
}
/// Adds a hint to help users resolve the issue.
///
/// Hints should provide actionable guidance for end users.
pub fn with_hint(mut self, hint: impl Into<String>) -> Self {
self.hint = Some(hint.into());
self
}
/// Adds internal context for debugging.
///
/// Context is meant for developers and should include information
/// about what the system was doing when the error occurred.
/// TODO: currently unarmed, but can be used to log errors in a
/// standardized way in the future.
pub fn with_context(mut self, context: impl Into<String>) -> Self {
self.context = Some(context.into());
self
}
/// Adds a source error for error chaining.
///
/// This is useful for preserving the original error that caused this error.
///
pub fn with_source(mut self, source: impl StdError + Send + Sync + 'static) -> Self {
self.source = Some(Arc::new(source));
self
}
/// Adds a source error from an existing Arc.
///
/// This is useful when forwarding errors that are already wrapped in an Arc,
/// avoiding unnecessary allocations.
pub fn with_source_arc(mut self, source: Arc<dyn StdError + Send + Sync>) -> Self {
self.source = Some(source);
self
}
/// Returns the numeric error code.
///
/// This is a convenience method that calls `code()` on the error code.
pub fn code(&self) -> i32 {
self.code.code()
}
/// Converts this error to use a different error code type.
///
/// This is useful when errors need to be converted between different
/// modules or layers that use different error code enums.
pub fn map_code<T: ErrorCode>(self, new_code: T) -> Error<T> {
Error {
code: new_code,
message: self.message,
hint: self.hint,
context: self.context,
source: self.source,
}
}
}
/// Displays the error message.
impl<C: ErrorCode> core::fmt::Display for Error<C> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self.message)
}
}
/// Implements the standard error trait for error chaining.
impl<C: ErrorCode> StdError for Error<C> {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.source
.as_ref()
.map(|e| e.as_ref() as &(dyn StdError + 'static))
}
}
/// Converts a `GreenlightError` into a `tonic::Status` for gRPC transmission.
///
/// The error details are JSON-encoded and included in the status details
/// field. If JSON serialization fails, a fallback JSON string is used.
impl<C: ErrorCode + ErrorStatusConversionExt> From<Error<C>> for tonic::Status {
fn from(value: Error<C>) -> Self {
let code = value.code.status_code();
let details: Bytes = serde_json::to_vec(&GrpcErrorDetails {
code: value.code(),
hint: value.hint.clone(),
})
.unwrap_or_else(|_| {
// Fallback to simple JSON if serialization fails
// This ensures we always send valid JSON even if something goes wrong
format!(
"{{\"code\":{},\"message\":\"{}\"}}",
value.code(),
value.message,
)
.into_bytes()
})
.into();
tonic::Status::with_details(code, value.message, details)
}
}
/// Attempts to convert a `tonic::Status` back into a `GreenlightError`.
///
/// This requires that:
/// 1. The status contains valid JSON details in the expected format
/// 2. The error code in the details can be mapped to a valid `C` variant
///
/// Returns an `anyhow::Error` if parsing fails or the error code is unknown.
impl<C: ErrorCode> TryFrom<tonic::Status> for Error<C> {
type Error = ParsingError;
fn try_from(value: tonic::Status) -> std::result::Result<Self, Self::Error> {
let grpc_err: GrpcErrorInfo = value
.try_into()
.map_err(|e| parsing_error!("failed to convert Status into GrpcErrorInfo {}", e))?;
let code = C::from_code(grpc_err.code)
.ok_or_else(|| parsing_error!("unknown error code: {}", grpc_err.code))?;
Ok(Self {
code,
message: grpc_err.message,
hint: grpc_err.hint,
context: None,
source: None,
})
}
}
/// Type alias for Core Lightning RPC error codes.
///
/// CLN uses specific numeric codes to indicate different types of failures
/// in payment operations. This type preserves the original error code
/// for debugging and logging purposes.
pub type ClnRpcError = i32;
/// Implementation of `ErrorCode` for CLN RPC errors.
///
/// This implementation treats all i32 values as valid error codes,
/// allowing us to preserve any error code returned by CLN without loss.
impl ErrorCode for ClnRpcError {
fn code(&self) -> i32 {
*self
}
fn from_code(code: i32) -> Option<Self>
where
Self: Sized,
{
Some(code)
}
}
/// Implementation of RPC error codes
use cln_rpc::RpcError;
pub trait RpcErrConversion: ErrorCode {
fn from_rpc_error(value: &RpcError) -> Self;
}
impl<C: ErrorCode + RpcErrConversion> From<RpcError> for Error<C> {
fn from(value: RpcError) -> Self {
let code = C::from_rpc_error(&value);
Self::new(code, value.message)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_status_conversion() {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
enum TestErrorCodes {
FailedPrecondition = 101,
NotFound = 202,
}
impl ErrorCode for TestErrorCodes {
fn code(&self) -> i32 {
*self as i32
}
fn from_code(_code: i32) -> Option<Self>
where
Self: Sized,
{
unimplemented!()
}
}
impl core::fmt::Display for TestErrorCodes {
fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
unimplemented!()
}
}
impl ErrorStatusConversionExt for TestErrorCodes {
fn status_code(&self) -> tonic::Code {
match self {
TestErrorCodes::FailedPrecondition => tonic::Code::FailedPrecondition,
TestErrorCodes::NotFound => tonic::Code::NotFound,
}
}
}
type TestError = Error<TestErrorCodes>;
let t_err = TestError::new(TestErrorCodes::FailedPrecondition, "a failed precondition")
.with_hint("How to resolve it");
let status: tonic::Status = t_err.clone().into();
assert_eq!(status.message(), t_err.message);
let mut details: serde_json::Value = serde_json::from_slice(status.details()).unwrap();
assert_eq!(
details["code"].take(),
TestErrorCodes::FailedPrecondition.code()
);
assert_eq!(details["hint"].take(), "How to resolve it");
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/uniffi-bindgen/src/main.rs | libs/uniffi-bindgen/src/main.rs | fn main() {
uniffi::uniffi_bindgen_main()
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/build.rs | libs/gl-client/build.rs | use std::env::var;
use std::path::Path;
use std::process::Command;
fn main() {
let manifest_dir = var("CARGO_MANIFEST_DIR").unwrap();
let nobody_crt: String = format!("{}/.resources/tls/users-nobody.pem", manifest_dir);
let nobody_key: String = format!("{}/.resources/tls/users-nobody-key.pem", manifest_dir);
// It's a lot easier to help users if we have the exact version of
// the Rust bindings that were used.
let output = Command::new("git")
.args(&["rev-parse", "HEAD"])
.output()
.unwrap();
let git_hash = String::from_utf8(output.stdout).unwrap();
println!("cargo:rustc-env=GIT_HASH={}", git_hash);
// Either both are set or none is :-) We set an env-var for
// `rustc` to pick up and include using `env!`.
let vars = match (var("GL_CUSTOM_NOBODY_KEY"), var("GL_CUSTOM_NOBODY_CERT")) {
(Ok(a), Ok(b)) => (a, b),
(Err(_), Err(_)) => {
println!("cargo:warning=Using default NOBODY cert.");
println!("cargo:warning=Set \"GL_CUSTOM_NOBODY_KEY\" and \"GL_CUSTOM_NOBODY_CERT\" to use a custom cert.");
(nobody_key.to_owned(), nobody_crt.to_owned())
}
(Ok(_), Err(_)) => {
println!("Missing GL_CUSTOM_NOBODY_CERT, since you are using GL_CUSTOM_NOBODY_KEY");
std::process::exit(1);
}
(Err(_), Ok(_)) => {
println!("Missing GL_CUSTOM_NOBODY_KEY, since you are using GL_CUSTOM_NOBODY_CERT");
std::process::exit(1);
}
};
// This actually sets the GL_NOBODY_KEY and GL_NOBODY_CRT env to the
// path of the given certs.
println!("cargo:rustc-env=GL_NOBODY_KEY={}", vars.0);
println!("cargo:rustc-env=GL_NOBODY_CRT={}", vars.1);
// We check that these exist before we compile.
let key_path = Path::new(&vars.0);
let cert_path = Path::new(&vars.1);
match (key_path.exists(), cert_path.exists()) {
(true, true) => (),
(_, _) => {
// We could not find either the key or the cert.
println!(
"Could not find cert and key files: {:?}, {:?}",
key_path, cert_path
);
std::process::exit(1);
}
}
// Setting a custom certificate causes rebuilds of this crate
println!("cargo:rerun-if-env-changed=GL_CUSTOM_NOBODY_CERT");
println!("cargo:rerun-if-env-changed=GL_CUSTOM_NOBODY_KEY");
let builder = tonic_build::configure();
builder
.type_attribute(".", "#[derive(serde::Serialize,serde::Deserialize)]")
.protoc_arg("--experimental_allow_proto3_optional")
.compile(
&[
".resources/proto/glclient/greenlight.proto",
".resources/proto/glclient/scheduler.proto",
".resources/proto/node.proto",
],
&[".resources/proto"],
)
.unwrap();
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/tls.rs | libs/gl-client/src/tls.rs | use anyhow::{Context, Result};
use log::debug;
use std::path::Path;
use tonic::transport::{Certificate, ClientTlsConfig, Identity};
use x509_certificate::X509Certificate;
const CA_RAW: &[u8] = include_str!("../.resources/tls/ca.pem").as_bytes();
const NOBODY_CRT: &[u8] = include_str!(env!("GL_NOBODY_CRT")).as_bytes();
const NOBODY_KEY: &[u8] = include_str!(env!("GL_NOBODY_KEY")).as_bytes();
/// In order to allow the clients to talk to the
/// [`crate::scheduler::Scheduler`] a default certificate and private
/// key is included in this crate. The only service endpoints that can
/// be contacted with this `NOBODY` identity are
/// [`Scheduler.register`] and [`Scheduler.recover`], as these are the
/// endpoints that are used to prove ownership of a node, and
/// returning valid certificates if that proof succeeds.
#[derive(Clone, Debug)]
pub struct TlsConfig {
pub(crate) inner: ClientTlsConfig,
/// Copy of the private key in the TLS identity. Stored here in
/// order to be able to use it in the `AuthLayer`.
pub(crate) private_key: Option<Vec<u8>>,
pub ca: Vec<u8>,
/// The device_crt parsed as an x509 certificate. Used to
/// validate the common subject name against the node_id
/// configured on the scheduler.
pub x509_cert: Option<X509Certificate>,
}
/// Tries to load nobody credentials from a file that is passed by an envvar and
/// defaults to the nobody cert and key paths that have been set during build-
/// time.
fn load_file_or_default(varname: &str, default: &[u8]) -> Vec<u8> {
match std::env::var(varname) {
Ok(fname) => {
debug!("Loading file {} for envvar {}", fname, varname);
let f = std::fs::read(fname.clone());
if f.is_err() {
debug!(
"Could not find file {} for var {}, loading from default",
fname, varname
);
default.to_vec()
} else {
f.unwrap()
}
}
Err(_) => default.to_vec(),
}
}
impl TlsConfig {
pub fn new() -> Self {
debug!("Configuring TlsConfig with nobody identity");
let nobody_crt = load_file_or_default("GL_NOBODY_CRT", NOBODY_CRT);
let nobody_key = load_file_or_default("GL_NOBODY_KEY", NOBODY_KEY);
let ca_crt = load_file_or_default("GL_CA_CRT", CA_RAW);
// it is ok to panic here in case of a broken nobody certificate.
// We can not do anything at all and should fail loudly!
Self::with(nobody_crt, nobody_key, ca_crt)
}
pub fn with<V: AsRef<[u8]>>(crt: V, key: V, ca_crt: V) -> Self {
let x509_cert = x509_certificate_from_pem_or_none(&crt);
let config = ClientTlsConfig::new()
.ca_certificate(Certificate::from_pem(ca_crt.as_ref()))
.identity(Identity::from_pem(crt, key.as_ref()));
TlsConfig {
inner: config,
private_key: Some(key.as_ref().to_vec()),
ca: ca_crt.as_ref().to_vec(),
x509_cert,
}
}
}
impl TlsConfig {
/// This function is used to upgrade the anonymous `NOBODY`
/// configuration to a fully authenticated configuration.
///
/// Only non-`NOBODY` configurations are able to talk to their
/// nodes. If the `TlsConfig` is not upgraded, nodes will reply
/// with handshake failures, and abort the connection attempt.
pub fn identity(self, cert_pem: Vec<u8>, key_pem: Vec<u8>) -> Self {
let x509_cert = x509_certificate_from_pem_or_none(&cert_pem);
TlsConfig {
inner: self.inner.identity(Identity::from_pem(&cert_pem, &key_pem)),
private_key: Some(key_pem),
x509_cert,
..self
}
}
/// Upgrades the connection using an identity based on a certificate
/// and key from a path.
///
/// The path is a directory that contains a `client.crt` and
/// a `client-key.pem`-file which contain respectively the certificate
/// and private key.
pub fn identity_from_path<P: AsRef<Path>>(self, path: P) -> Result<Self> {
let cert_path = path.as_ref().join("client.crt");
let key_path = path.as_ref().join("client-key.pem");
let cert_pem = std::fs::read(cert_path.clone())
.with_context(|| format!("Failed to read '{}'", cert_path.display()))?;
let key_pem = std::fs::read(key_path.clone())
.with_context(|| format!("Failed to read '{}", key_path.display()))?;
Ok(self.identity(cert_pem, key_pem))
}
/// This function is mostly used to allow running integration
/// tests against a local mock of the service. It should not be
/// used in production, since the preconfigured CA ensures that
/// only the greenlight production servers can complete a valid
/// handshake.
pub fn ca_certificate(self, ca: Vec<u8>) -> Self {
TlsConfig {
inner: self.inner.ca_certificate(Certificate::from_pem(&ca)),
ca,
..self
}
}
pub fn client_tls_config(&self) -> ClientTlsConfig {
self.inner.clone()
}
}
impl Default for TlsConfig {
fn default() -> Self {
Self::new()
}
}
/// A wrapper that returns an Option that contains a `X509Certificate`
/// if it could be parsed from the given `pem` data or None if it could
/// not be parsed. Logs a failed attempt.
fn x509_certificate_from_pem_or_none(pem: impl AsRef<[u8]>) -> Option<X509Certificate> {
X509Certificate::from_pem(pem)
.map_err(|e| debug!("Failed to parse x509 certificate: {}", e))
.ok()
}
/// Generate a new device certificate from a fresh set of keys. The path in the
/// common name (CN) field is "/users/{node_id}/{device}". This certificate is
/// self signed and needs to be signed off by the users certificate authority to
/// be valid. This certificate can not act as a ca and sign sub certificates.
/// It can take an optional key pair to create the certificate from instead of
/// generating a key pair from scratch.
pub fn generate_self_signed_device_cert(
node_id: &str,
device: &str,
subject_alt_names: Vec<String>,
key_pair: Option<rcgen::KeyPair>,
) -> rcgen::Certificate {
// Configure the certificate.
let mut params = cert_params_from_template(subject_alt_names);
// Is a leaf certificate only so it is not allowed to sign child
// certificates.
params.is_ca = rcgen::IsCa::ExplicitNoCa;
params.distinguished_name.push(
rcgen::DnType::CommonName,
format!("/users/{}/{}", node_id, device),
);
// Start from an empty key pair.
params.key_pair = key_pair;
params.alg = &rcgen::PKCS_ECDSA_P256_SHA256;
rcgen::Certificate::from_params(params).unwrap()
}
fn cert_params_from_template(subject_alt_names: Vec<String>) -> rcgen::CertificateParams {
let mut params = rcgen::CertificateParams::new(subject_alt_names);
// Certificate can be used to issue unlimited sub certificates for devices.
params
.distinguished_name
.push(rcgen::DnType::CountryName, "US");
params
.distinguished_name
.push(rcgen::DnType::LocalityName, "SAN FRANCISCO");
params
.distinguished_name
.push(rcgen::DnType::OrganizationName, "Blockstream");
params
.distinguished_name
.push(rcgen::DnType::StateOrProvinceName, "CALIFORNIA");
params.distinguished_name.push(
rcgen::DnType::OrganizationalUnitName,
"CertificateAuthority",
);
params
}
pub fn generate_ecdsa_key_pair() -> rcgen::KeyPair {
rcgen::KeyPair::generate(&rcgen::PKCS_ECDSA_P256_SHA256).unwrap()
}
#[cfg(test)]
pub mod tests {
use rcgen::KeyPair;
use super::*;
#[test]
fn test_generate_self_signed_device_cert() {
let device_cert =
generate_self_signed_device_cert("mynodeid", "device", vec!["localhost".into()], None);
assert!(device_cert
.serialize_pem()
.unwrap()
.starts_with("-----BEGIN CERTIFICATE-----"));
assert!(device_cert
.serialize_private_key_pem()
.starts_with("-----BEGIN PRIVATE KEY-----"));
}
#[test]
fn test_generate_self_signed_device_cert_from_pem() {
let kp = generate_ecdsa_key_pair();
let keys = KeyPair::from_der(kp.serialized_der()).unwrap();
let cert = generate_self_signed_device_cert(
"mynodeid",
"device",
vec!["localhost".into()],
Some(keys),
);
assert!(kp.serialize_pem() == cert.get_key_pair().serialize_pem());
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/persist.rs | libs/gl-client/src/persist.rs | use lightning_signer::bitcoin::secp256k1::PublicKey;
use lightning_signer::chain::tracker::ChainTracker;
use lightning_signer::channel::ChannelId;
use lightning_signer::channel::ChannelStub;
use lightning_signer::node::NodeConfig;
use lightning_signer::node::NodeState;
use lightning_signer::persist::ChainTrackerListenerEntry;
use lightning_signer::persist::{Error, Persist, SignerId};
use lightning_signer::policy::validator::ValidatorFactory;
use lightning_signer::SendSync;
use log::{trace, warn};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::Arc;
use std::sync::Mutex;
const NODE_PREFIX: &str = "nodes";
const NODE_STATE_PREFIX: &str = "nodestates";
const CHANNEL_PREFIX: &str = "channels";
const ALLOWLIST_PREFIX: &str = "allowlists";
const TRACKER_PREFIX: &str = "trackers";
#[derive(Clone, Serialize, Deserialize)]
pub struct State {
values: BTreeMap<String, (u64, serde_json::Value)>,
}
impl State {
fn insert_node(
&mut self,
key: &str,
node_entry: vls_persist::model::NodeEntry,
node_state_entry: vls_persist::model::NodeStateEntry,
) -> Result<(), Error> {
let node_key = format!("{NODE_PREFIX}/{key}");
let state_key = format!("{NODE_STATE_PREFIX}/{key}");
assert!(!self.values.contains_key(&node_key), "inserting node twice");
assert!(
!self.values.contains_key(&state_key),
"inserting node_state twice"
);
self.values
.insert(node_key, (0u64, serde_json::to_value(node_entry).unwrap()));
self.values.insert(
state_key,
(0u64, serde_json::to_value(node_state_entry).unwrap()),
);
Ok(())
}
fn update_node(
&mut self,
key: &str,
node_state: vls_persist::model::NodeStateEntry,
) -> Result<(), Error> {
trace!(
"Update node: {}",
serde_json::to_string(&node_state).unwrap()
);
let key = format!("{NODE_STATE_PREFIX}/{key}");
let v = self
.values
.get_mut(&key)
.expect("attempting to update non-existent node");
*v = (v.0 + 1, serde_json::to_value(node_state).unwrap());
Ok(())
}
fn delete_node(&mut self, key: &str) -> Result<(), Error> {
let node_key = format!("{NODE_PREFIX}/{key}");
let state_key = format!("{NODE_STATE_PREFIX}/{key}");
self.values.remove(&node_key);
self.values.remove(&state_key);
Ok(())
}
fn insert_channel(
&mut self,
key: &str,
channel_entry: vls_persist::model::ChannelEntry,
) -> Result<(), Error> {
let key = format!("{CHANNEL_PREFIX}/{key}");
assert!(!self.values.contains_key(&key));
self.values
.insert(key, (0u64, serde_json::to_value(channel_entry).unwrap()));
Ok(())
}
fn delete_channel(&mut self, key: &str) {
self.values.remove(key);
}
fn update_channel(
&mut self,
key: &str,
channel_entry: vls_persist::model::ChannelEntry,
) -> Result<(), Error> {
trace!("Updating channel {key}");
let key = format!("{CHANNEL_PREFIX}/{key}");
let v = self
.values
.get_mut(&key)
.expect("attempting to update non-existent channel");
*v = (v.0 + 1, serde_json::to_value(channel_entry).unwrap());
Ok(())
}
fn get_channel(
&self,
key: &str,
) -> Result<lightning_signer::persist::model::ChannelEntry, Error> {
let key = format!("{CHANNEL_PREFIX}/{key}");
let value = self.values.get(&key).unwrap();
let entry: vls_persist::model::ChannelEntry =
serde_json::from_value(value.1.clone()).unwrap();
Ok(entry.into())
}
fn get_node_channels(
&self,
node_id: &PublicKey,
) -> Result<
Vec<(
lightning_signer::channel::ChannelId,
lightning_signer::persist::model::ChannelEntry,
)>,
Error,
> {
let prefix = hex::encode(node_id.serialize());
let prefix = format!("{CHANNEL_PREFIX}/{prefix}");
Ok(self
.values
.iter()
.filter(|(k, _)| k.starts_with(&prefix))
.map(|(k, v)| {
let key = k.split('/').last().unwrap();
let key = vls_persist::model::NodeChannelId(hex::decode(&key).unwrap());
let value: vls_persist::model::ChannelEntry =
serde_json::from_value(v.1.clone()).unwrap();
(key.channel_id(), value.into())
})
.collect())
}
fn new_chain_tracker(
&mut self,
node_id: &PublicKey,
tracker: &ChainTracker<lightning_signer::monitor::ChainMonitor>,
) -> Result<(), Error> {
let key = hex::encode(node_id.serialize());
let key = format!("{TRACKER_PREFIX}/{key}");
assert!(!self.values.contains_key(&key));
let tracker: vls_persist::model::ChainTrackerEntry = tracker.into();
self.values
.insert(key, (0u64, serde_json::to_value(tracker).unwrap()));
Ok(())
}
pub fn clear(&mut self) -> Result<(), Error> {
self.values.clear();
Ok(())
}
}
#[derive(Debug)]
pub struct StateChange {
key: String,
old: Option<(u64, serde_json::Value)>,
new: (u64, serde_json::Value),
}
use core::fmt::Display;
impl Display for StateChange {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
match &self.old {
Some(o) => f.write_str(&format!(
"StateChange[{}]: old_version={}, new_version={}, old_value={}, new_value={}",
self.key,
o.0,
self.new.0,
serde_json::to_string(&o.1).unwrap(),
serde_json::to_string(&self.new.1).unwrap()
)),
None => f.write_str(&format!(
"StateChange[{}]: old_version=null, new_version={}, old_value=null, new_value={}",
self.key,
self.new.0,
serde_json::to_string(&self.new.1).unwrap()
)),
}
}
}
impl State {
pub fn new() -> Self {
State {
values: BTreeMap::new(),
}
}
/// Take another `State` and attempt to update ourselves with any
/// entry that is newer than ours. This may fail if the other
/// state includes states that are older than our own.
pub fn merge(&mut self, other: &State) -> anyhow::Result<Vec<(String, Option<u64>, u64)>> {
let mut res = Vec::new();
for (key, (newver, newval)) in other.values.iter() {
let local = self.values.get_mut(key);
match local {
None => {
trace!("Insert new key {}: version={}", key, newver);
res.push((key.to_owned(), None, *newver));
self.values.insert(key.clone(), (*newver, newval.clone()));
}
Some(v) => {
if v.0 == *newver {
continue;
} else if v.0 > *newver {
warn!("Ignoring outdated state version newver={}, we have oldver={}: newval={:?} vs oldval={:?}", newver, v.0, serde_json::to_string(newval), serde_json::to_string(&v.1));
continue;
} else {
trace!(
"Updating key {}: version={} => version={}",
key,
v.0,
*newver
);
res.push((key.to_owned(), Some(v.0), *newver));
*v = (*newver, newval.clone());
}
}
}
}
Ok(res)
}
pub fn diff(&self, other: &State) -> anyhow::Result<Vec<StateChange>> {
Ok(other
.values
.iter()
.map(|(key, (ver, val))| (key, self.values.get(key), (ver, val)))
.map(|(key, old, new)| StateChange {
key: key.clone(),
old: old.map(|o| o.clone()),
new: (*new.0, new.1.clone()),
})
.filter(|c| match (&c.old, &c.new) {
(None, _) => true,
(Some((oldver, _)), (newver, _)) => oldver < newver,
})
.collect())
}
}
impl Into<Vec<crate::pb::SignerStateEntry>> for State {
fn into(self) -> Vec<crate::pb::SignerStateEntry> {
self.values
.iter()
.map(|(k, v)| crate::pb::SignerStateEntry {
key: k.to_owned(),
value: serde_json::to_vec(&v.1).unwrap(),
version: v.0,
})
.collect()
}
}
impl From<Vec<crate::pb::SignerStateEntry>> for State {
fn from(v: Vec<crate::pb::SignerStateEntry>) -> State {
use std::iter::FromIterator;
let values = BTreeMap::from_iter(v.iter().map(|v| {
(
v.key.to_owned(),
(v.version, serde_json::from_slice(&v.value).unwrap()),
)
}));
State { values }
}
}
pub(crate) struct MemoryPersister {
state: Arc<Mutex<State>>,
}
impl MemoryPersister {
pub fn new() -> Self {
let state = Arc::new(Mutex::new(State {
values: BTreeMap::new(),
}));
MemoryPersister { state }
}
pub fn state(&self) -> Arc<Mutex<State>> {
self.state.clone()
}
}
impl SendSync for MemoryPersister {}
impl Persist for MemoryPersister {
fn new_node(
&self,
node_id: &lightning_signer::bitcoin::secp256k1::PublicKey,
config: &NodeConfig,
state: &NodeState,
) -> Result<(), Error> {
let key = hex::encode(node_id.serialize());
self.state.lock().unwrap().insert_node(
&key,
vls_persist::model::NodeEntry {
key_derivation_style: config.key_derivation_style as u8,
network: config.network.to_string(),
},
state.into(),
)
}
fn delete_channel(&self, node_id: &PublicKey, channel: &ChannelId) -> Result<(), Error> {
let node_channel_id = vls_persist::model::NodeChannelId::new(node_id, &channel);
let id = hex::encode(node_channel_id.0);
self.state.lock().unwrap().delete_channel(&id);
Ok(())
}
fn update_node(
&self,
node_id: &lightning_signer::bitcoin::secp256k1::PublicKey,
state: &NodeState,
) -> Result<(), Error> {
let key = hex::encode(node_id.serialize());
self.state.lock().unwrap().update_node(&key, state.into())
}
fn delete_node(
&self,
node_id: &lightning_signer::bitcoin::secp256k1::PublicKey,
) -> Result<(), Error> {
let key = hex::encode(node_id.serialize());
self.state.lock().unwrap().delete_node(&key)
}
fn new_channel(
&self,
node_id: &lightning_signer::bitcoin::secp256k1::PublicKey,
stub: &ChannelStub,
) -> Result<(), Error> {
let id = vls_persist::model::NodeChannelId::new(node_id, &stub.id0);
let channel_value_satoshis = 0;
let enforcement_state = lightning_signer::policy::validator::EnforcementState::new(0);
let entry = vls_persist::model::ChannelEntry {
channel_value_satoshis,
channel_setup: None,
id: None,
enforcement_state,
// birth blockheight for stub, None for channel
blockheight: Some(stub.blockheight),
};
let id = hex::encode(id.0);
self.state.lock().unwrap().insert_channel(&id, entry)
}
fn update_channel(
&self,
node_id: &lightning_signer::bitcoin::secp256k1::PublicKey,
channel: &lightning_signer::channel::Channel,
) -> Result<(), Error> {
let node_channel_id = vls_persist::model::NodeChannelId::new(node_id, &channel.id0);
let id = hex::encode(node_channel_id.0);
let channel_value_satoshis = channel.setup.channel_value_sat;
let entry = vls_persist::model::ChannelEntry {
channel_value_satoshis,
channel_setup: Some(channel.setup.clone()),
id: channel.id.clone(),
enforcement_state: channel.enforcement_state.clone(),
blockheight: None,
};
self.state.lock().unwrap().update_channel(&id, entry)
}
fn get_channel(
&self,
node_id: &PublicKey,
channel_id: &ChannelId,
) -> Result<lightning_signer::persist::model::ChannelEntry, Error> {
let id = vls_persist::model::NodeChannelId::new(node_id, channel_id);
let id = hex::encode(id.0);
self.state.lock().unwrap().get_channel(&id)
}
fn new_tracker(
&self,
node_id: &PublicKey,
tracker: &ChainTracker<lightning_signer::monitor::ChainMonitor>,
) -> Result<(), Error> {
self.state
.lock()
.unwrap()
.new_chain_tracker(node_id, tracker)
}
fn update_tracker(
&self,
node_id: &PublicKey,
tracker: &ChainTracker<lightning_signer::monitor::ChainMonitor>,
) -> Result<(), Error> {
let key = hex::encode(node_id.serialize());
let key = format!("{TRACKER_PREFIX}/{key}");
let mut state = self.state.lock().unwrap();
let v = state.values.get_mut(&key).unwrap();
let tracker: vls_persist::model::ChainTrackerEntry = tracker.into();
*v = (v.0 + 1, serde_json::to_value(tracker).unwrap());
Ok(())
}
fn get_tracker(
&self,
node_id: PublicKey,
validator_factory: Arc<dyn ValidatorFactory>,
) -> Result<
(
ChainTracker<lightning_signer::monitor::ChainMonitor>,
Vec<ChainTrackerListenerEntry>,
),
Error,
> {
let key = hex::encode(node_id.serialize());
let key = format!("{TRACKER_PREFIX}/{key}");
let state = self.state.lock().unwrap();
let v: vls_persist::model::ChainTrackerEntry =
serde_json::from_value(state.values.get(&key).unwrap().1.clone()).unwrap();
Ok(v.into_tracker(node_id, validator_factory))
}
fn get_node_channels(
&self,
node_id: &PublicKey,
) -> Result<Vec<(ChannelId, lightning_signer::persist::model::ChannelEntry)>, Error> {
self.state.lock().unwrap().get_node_channels(node_id)
}
fn update_node_allowlist(
&self,
node_id: &PublicKey,
allowlist: Vec<std::string::String>,
) -> Result<(), Error> {
let key = hex::encode(node_id.serialize());
let key = format!("{ALLOWLIST_PREFIX}/{key}");
let mut state = self.state.lock().unwrap();
match state.values.get_mut(&key) {
Some(v) => {
*v = (v.0 + 1, serde_json::to_value(allowlist).unwrap());
}
None => {
state
.values
.insert(key, (0u64, serde_json::to_value(allowlist).unwrap()));
}
}
Ok(())
}
fn get_node_allowlist(&self, node_id: &PublicKey) -> Result<Vec<std::string::String>, Error> {
let state = self.state.lock().unwrap();
let key = hex::encode(node_id.serialize());
let key = format!("{ALLOWLIST_PREFIX}/{key}");
let allowlist: Vec<String> =
serde_json::from_value(state.values.get(&key).unwrap().1.clone()).unwrap();
Ok(allowlist)
}
fn get_nodes(
&self,
) -> Result<Vec<(PublicKey, lightning_signer::persist::model::NodeEntry)>, Error> {
use lightning_signer::node::NodeState as CoreNodeState;
let state = self.state.lock().unwrap();
let node_ids: Vec<&str> = state
.values
.keys()
.map(|k| k.split('/'))
.filter(|k| k.clone().next().unwrap() == NODE_PREFIX)
.map(|k| k.clone().last().unwrap())
.collect();
let mut res = Vec::new();
for node_id in node_ids.iter() {
let node_key = format!("{NODE_PREFIX}/{node_id}");
let state_key = format!("{NODE_STATE_PREFIX}/{node_id}");
let node: vls_persist::model::NodeEntry =
serde_json::from_value(state.values.get(&node_key).unwrap().1.clone()).unwrap();
let state_e: vls_persist::model::NodeStateEntry =
serde_json::from_value(state.values.get(&state_key).unwrap().1.clone()).unwrap();
let state = CoreNodeState::restore(
state_e.invoices,
state_e.issued_invoices,
state_e.preimages,
0,
state_e.velocity_control.into(),
state_e.fee_velocity_control.into(),
0u64,
/* dbid_high_water_mark: prevents reuse of
* channel dbid, 0 disables enforcement. */
);
let entry = lightning_signer::persist::model::NodeEntry {
key_derivation_style: node.key_derivation_style,
network: node.network,
state,
};
let key: Vec<u8> = hex::decode(node_id).unwrap();
res.push((PublicKey::from_slice(key.as_slice()).unwrap(), entry));
}
let nodes = res;
Ok(nodes)
}
fn clear_database(&self) -> Result<(), Error> {
self.state.lock().unwrap().clear()
}
fn signer_id(&self) -> SignerId {
// The signers are clones of each other in Greenlight, and as
// such we should not need to differentiate them. We therefore
// just return a static dummy ID.
[0u8; 16]
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/lib.rs | libs/gl-client/src/lib.rs | //! Greenlight client library to schedule nodes, interact with them
//! and sign off on signature requests.
//!
/// Interact with a node running on greenlight.
///
/// The node must be scheduled using [`crate::scheduler::Scheduler`]:
///
///
pub mod node;
/// Generated protobuf messages and client stubs.
///
/// Since the client needs to be configured correctly, don't use
/// [`pb::node_client::NodeClient`] directly, rather use
/// [`node::Node`] to create a correctly configured client.
pub mod pb;
use std::time::Duration;
/// Register, recover and schedule your nodes on greenlight.
pub mod scheduler;
/// Your keys, your coins!
///
/// This module implements the logic to stream, verify and respond to
/// signature requests from the node. Without this the node cannot
/// move your funds.
pub mod signer;
pub mod persist;
pub mod lnurl;
/// The pairing service that pairs signer-less clients with existing
/// signers.
pub mod pairing;
/// Helpers to configure the mTLS connection authentication.
///
/// mTLS configuration for greenlight clients. Clients are
/// authenticated by presenting a valid mTLS certificate to the
/// node. Each node has its own CA. This CA is used to sign both the
/// device certificates, as well as the node certificate itself. This
/// ensures that only clients that are authorized can open a
/// connection to the node.
pub mod tls;
#[cfg(feature = "export")]
pub mod export;
/// Tools to interact with a node running on greenlight.
pub mod utils;
pub mod credentials;
pub mod util;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
#[error("The signature request does not match any authorized RPC calls")]
MissingAuthorization,
}
pub use lightning_signer::bitcoin;
pub use lightning_signer::lightning;
pub use lightning_signer::lightning_invoice;
pub(crate) const TCP_KEEPALIVE: Duration = Duration::from_secs(1);
pub(crate) const TCP_KEEPALIVE_TIMEOUT: Duration = Duration::from_secs(5);
pub mod runes;
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/runes.rs | libs/gl-client/src/runes.rs | use runeauth::{Alternative, Check, Condition, ConditionChecker, Restriction, Rune, RuneError};
use std::fmt::Display;
use std::time::{SystemTime, UNIX_EPOCH};
/// Represents an entity that can provide restrictions.
///
/// The `Restrictor` trait should be implemented by types that are able to
/// produce a list of `Restriction`s. The `generate` method returns a `Result`
/// containing a vector of `Restriction`s or a `RuneError` in case of any error.
pub trait Restrictor {
/// Retrieves the restrictions associated with the current instance.
///
/// # Returns
/// A `Result` containing a `Vec` of `Restriction`s. In the event of
/// failure, returns a `RuneError`.
fn generate(self) -> Result<Vec<Restriction>, RuneError>;
}
/// A factory responsible for carving runes.
///
/// `RuneFactory` provides utility functions to manipulate and produce runes
/// with certain characteristics, such as additional restrictions.
pub struct RuneFactory;
impl RuneFactory {
/// Combines an original `Rune` with a list of restricters,
/// and produces a new rune in base64 format.
///
/// # Parameters
/// - `origin`: A reference to the original `Rune` that will serve as the
/// base.
/// - `append`: A `Vec` containing entities that implement the `Restrictor`
/// trait.
///
/// # Returns
/// A `Result` containing a `String` representing the carved rune in base64 format.
/// In the event of any failure during the carving process, returns a `RuneError`.
pub fn carve<T: Restrictor + Copy>(origin: &Rune, append: &[T]) -> Result<String, RuneError> {
let restrictions = append.into_iter().try_fold(Vec::new(), |mut acc, res| {
let mut r = res.generate()?;
acc.append(&mut r);
Ok(acc)
})?;
let mut originc = origin.clone();
restrictions.into_iter().for_each(|r| {
// Changes are applied in place, as well as returned, so
// this is ok.
let _ = originc.add_restriction(r);
});
Ok(originc.to_base64())
}
}
/// Predefined rule sets to generate `Restriction`s from.
#[derive(Clone, Copy)]
pub enum DefRules<'a> {
/// Represents a rule set where only read operations are allowed. This
/// translates to a `Restriction` that is "method^Get|method^List".
ReadOnly,
/// Represents a rule set where only the `pay` method is allowed. This
/// translates to a `Restriction` that is "method=pay".
Pay,
/// A special rule that adds the alternatives of the given `DefRules`
/// in a disjunctive set. Example: Add(vec![ReadOnly, Pay]) translates
/// to a `Restriction` that is "method^Get|method^List|method=pay".
Add(&'a [DefRules<'a>]),
}
impl<'a> Restrictor for DefRules<'a> {
/// Generate the actual `Restriction` entities based on the predefined rule
/// sets.
///
/// # Returns
/// A `Result` containing a vector of `Restriction` entities or a `RuneError`
/// if there's any error while generating the restrictions.
fn generate(self) -> Result<Vec<Restriction>, RuneError> {
match self {
DefRules::ReadOnly => {
let a: Vec<Restriction> = vec![Restriction::new(vec![
alternative("method", Condition::BeginsWith, "Get").unwrap(),
alternative("method", Condition::BeginsWith, "List").unwrap(),
])
.unwrap()];
Ok(a)
}
DefRules::Pay => {
let a =
vec![Restriction::new(vec![
alternative("method", Condition::Equal, "pay").unwrap()
])
.unwrap()];
Ok(a)
}
DefRules::Add(rules) => {
let alt_set =
rules
.into_iter()
.try_fold(Vec::new(), |mut acc: Vec<Alternative>, rule| {
let mut alts = rule
.generate()?
.into_iter()
.flat_map(|r| r.alternatives)
.collect();
acc.append(&mut alts);
Ok(acc)
})?;
let a = vec![Restriction::new(alt_set)?];
Ok(a)
}
}
}
}
impl<'a> Display for DefRules<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DefRules::ReadOnly => write!(f, "readonly"),
DefRules::Pay => write!(f, "pay"),
DefRules::Add(rules) => {
write!(
f,
"{}",
rules.into_iter().fold(String::new(), |acc, r| {
if acc.is_empty() {
format!("{}", r)
} else {
format!("{}|{}", acc, r)
}
})
)
}
}
}
}
/// Creates an `Alternative` based on the provided field, condition, and value.
///
/// This function is a shorthand for creating new `Alternative` entities
/// without having to manually wrap field and value into `String`.
///
/// # Parameters
/// - `field`: The field on which the alternative is based.
/// - `cond`: The condition to check against the field.
/// - `value`: The value to match with the condition against the field.
///
/// # Returns
///
/// A result containing the created `Alternative` or a `RuneError` if there's
/// any error in the creation.
fn alternative(field: &str, cond: Condition, value: &str) -> Result<Alternative, RuneError> {
Alternative::new(field.to_string(), cond, value.to_string(), false)
}
/// A context struct that holds information relevant to check a command against
/// a rune.
#[derive(Clone)]
pub struct Context {
// The rpc method associated with the request.
pub method: String,
// The public key associated with the request.
pub pubkey: String,
// The unique id.
pub unique_id: String,
// The timestamp associated with the request.
pub time: SystemTime,
// Todo (nepet): Add param field that uses enum or serde to store the params of a call.
}
/// Implementation of the `Check` trait for the `Context` struct, allowing it to
/// perform checks on rune alternatives.
impl Check for Context {
/// Performs a check on an alternative based on the context's fields.
///
/// # Arguments
///
/// * `alt` - The alternative to check against the context.
///
/// # Returns
///
/// * `Ok(())` if the check is successful, an `Err` containing a `RuneError` otherwise.
fn check_alternative(&self, alt: &Alternative) -> anyhow::Result<(), RuneError> {
let value = match alt.get_field().as_str() {
"" => self.unique_id.clone(),
"method" => self.method.clone(),
"pubkey" => self.pubkey.clone(),
"time" => self
.time
.duration_since(UNIX_EPOCH)
.map_err(|e| {
RuneError::Unknown(format!("Can not extract seconds from timestamp {:?}", e))
})?
.as_secs()
.to_string(),
_ => String::new(), // If we don't know the field we can not set it!
};
ConditionChecker { value }.check_alternative(alt)
}
}
#[cfg(test)]
mod tests {
use super::{Context, DefRules, RuneFactory};
use base64::{engine::general_purpose, Engine as _};
use runeauth::{Alternative, Condition, Restriction, Rune};
use std::time::SystemTime;
#[test]
fn test_carve_readonly_rune() {
let seed = [0; 32];
let mr = Rune::new_master_rune(&seed, vec![], None, None).unwrap();
// Carve a new rune from the master rune with given restrictions.
let carved = RuneFactory::carve(&mr, &[DefRules::ReadOnly]).unwrap();
let carved_byt = general_purpose::URL_SAFE.decode(&carved).unwrap();
let carved_restr = String::from_utf8(carved_byt[32..].to_vec()).unwrap(); // Strip off the authcode to inspect the restrictions.
assert_eq!(carved_restr, *"method^Get|method^List");
let carved_rune = Rune::from_base64(&carved).unwrap();
assert!(mr.is_authorized(&carved_rune));
}
#[test]
fn test_carve_disjunction_rune() {
let seed = [0; 32];
let mr = Rune::new_master_rune(&seed, vec![], None, None).unwrap();
// Carve a new rune from the master rune with given restrictions.
let carved =
RuneFactory::carve(&mr, &[DefRules::Add(&[DefRules::ReadOnly, DefRules::Pay])])
.unwrap();
let carved_byt = general_purpose::URL_SAFE.decode(&carved).unwrap();
let carved_restr = String::from_utf8(carved_byt[32..].to_vec()).unwrap(); // Strip off the authcode to inspect the restrictions.
assert_eq!(carved_restr, *"method^Get|method^List|method=pay");
let carved_rune = Rune::from_base64(&carved).unwrap();
assert!(mr.is_authorized(&carved_rune));
}
#[test]
fn test_defrules_display() {
let r = DefRules::Pay;
assert_eq!(format!("{}", r), "pay");
let r = DefRules::Add(&[DefRules::Pay]);
assert_eq!(format!("{}", r), "pay");
let r = DefRules::Add(&[DefRules::Pay, DefRules::ReadOnly]);
assert_eq!(format!("{}", r), "pay|readonly");
}
#[test]
fn test_context_check() {
let seedsecret = &[0; 32];
let mr = Rune::new_master_rune(seedsecret, vec![], None, None).unwrap();
// r1 restrictions: "pubkey=020000000000000000"
let r1 = Rune::new(
mr.authcode(),
vec![Restriction::new(vec![Alternative::new(
String::from("pubkey"),
Condition::Equal,
String::from("020000000000000000"),
false,
)
.unwrap()])
.unwrap()],
None,
None,
)
.unwrap();
// r2 restrictions: "method=GetInfo"
let r2 = Rune::new(
mr.authcode(),
vec![Restriction::new(vec![Alternative::new(
String::from("method"),
Condition::Equal,
String::from("GetInfo"),
false,
)
.unwrap()])
.unwrap()],
None,
None,
)
.unwrap();
// r3 restrictions: "pubkey!"
let r3 = Rune::new(
mr.authcode(),
vec![Restriction::new(vec![Alternative::new(
String::from("pubkey"),
Condition::Missing,
String::new(),
false,
)
.unwrap()])
.unwrap()],
None,
None,
)
.unwrap();
// r4 restriction: "method!"
let r4 = Rune::new(
mr.authcode(),
vec![Restriction::new(vec![Alternative::new(
String::from("method"),
Condition::Missing,
String::new(),
false,
)
.unwrap()])
.unwrap()],
None,
None,
)
.unwrap();
// These should succeed.
// Check with method="", pubkey=020000000000000000
let ctx = Context {
method: String::new(),
pubkey: String::from("020000000000000000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r1.are_restrictions_met(ctx).is_ok());
// Check with method="ListFunds", pubkey=020000000000000000
let ctx = Context {
method: String::from("ListFunds"),
pubkey: String::from("020000000000000000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r1.are_restrictions_met(ctx).is_ok());
// Check with method="GetInfo", pubkey=""
let ctx = Context {
method: String::from("GetInfo"),
pubkey: String::new(),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r2.are_restrictions_met(ctx).is_ok());
// Check with method="GetInfo", pubkey="020000000000000000"
let ctx = Context {
method: String::from("GetInfo"),
pubkey: String::from("020000000000000000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r2.are_restrictions_met(ctx).is_ok());
// Check with method="GetInfo", pubkey=""
let ctx = Context {
method: String::from("GetInfo"),
pubkey: String::new(),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r3.are_restrictions_met(ctx).is_ok());
// Check with method="", pubkey="020000"
let ctx = Context {
method: String::new(),
pubkey: String::from("020000000000000000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r4.are_restrictions_met(ctx).is_ok());
// These should fail.
// Check with method="ListFunds", pubkey=030000, wrong pubkey.
let ctx = Context {
method: String::from("ListFunds"),
pubkey: String::from("030000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r1.are_restrictions_met(ctx).is_err());
// Check with method="ListFunds", pubkey=030000, wrong method.
let ctx = Context {
method: String::from("ListFunds"),
pubkey: String::from("030000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r2.are_restrictions_met(ctx).is_err());
// Check with pubkey=030000, pubkey present.
let ctx = Context {
method: String::new(),
pubkey: String::from("030000"),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r3.are_restrictions_met(ctx).is_err());
// Check with method="GetInfo", method present.
let ctx = Context {
method: String::from("GetInfo"),
pubkey: String::new(),
time: SystemTime::now(),
unique_id: String::new(),
};
assert!(r4.are_restrictions_met(ctx).is_err());
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/pb.rs | libs/gl-client/src/pb.rs | pub mod greenlight {
tonic::include_proto!("greenlight");
}
pub mod scheduler {
tonic::include_proto!("scheduler");
}
pub use cln_grpc::pb as cln;
pub use greenlight::*;
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/util.rs | libs/gl-client/src/util.rs | pub fn is_feature_bit_enabled(bitmap: &[u8], index: usize) -> bool {
let n_bytes = bitmap.len();
let (byte_index, bit_index) = (index / 8, index % 8);
// The index doesn't fit in the byte-array
if byte_index >= n_bytes {
return false;
}
let selected_byte = bitmap[n_bytes - 1 - byte_index];
let bit_mask = 1u8 << (bit_index);
return (selected_byte & bit_mask) != 0;
}
#[cfg(test)]
mod test {
use super::*;
fn to_bitmap(feature_hex_string: &str) -> Result<Vec<u8>, String> {
hex::decode(&feature_hex_string).map_err(|x| x.to_string())
}
#[test]
fn test_parse_bitmap() {
// Check the lowest bits in the bitmap
let feature_bitmap_00 = to_bitmap(&"01").unwrap();
let feature_bitmap_01 = to_bitmap(&"02").unwrap();
let feature_bitmap_02 = to_bitmap(&"04").unwrap();
let feature_bitmap_03 = to_bitmap(&"08").unwrap();
let feature_bitmap_04 = to_bitmap(&"10").unwrap();
let feature_bitmap_05 = to_bitmap(&"20").unwrap();
let feature_bitmap_06 = to_bitmap(&"40").unwrap();
let feature_bitmap_07 = to_bitmap(&"80").unwrap();
// Check that the expected bit is enabled
assert!(is_feature_bit_enabled(&feature_bitmap_00, 0));
assert!(is_feature_bit_enabled(&feature_bitmap_01, 1));
assert!(is_feature_bit_enabled(&feature_bitmap_02, 2));
assert!(is_feature_bit_enabled(&feature_bitmap_03, 3));
assert!(is_feature_bit_enabled(&feature_bitmap_04, 4));
assert!(is_feature_bit_enabled(&feature_bitmap_05, 5));
assert!(is_feature_bit_enabled(&feature_bitmap_06, 6));
assert!(is_feature_bit_enabled(&feature_bitmap_07, 7));
// Check that other bits are disabled
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 0));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 2));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 3));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 4));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 5));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 6));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 7));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 8));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 9));
assert!(!is_feature_bit_enabled(&feature_bitmap_01, 1000));
}
#[test]
fn test_lsps_option_enabled_bitmap() {
// Copied from LSPS0
// This set bit number 729
let data = "0200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000";
let bitmap = to_bitmap(&data).unwrap();
// Check that the expected bit is enabled
assert!(is_feature_bit_enabled(&bitmap, 729));
// Check that the expected bit is disabled
assert!(!is_feature_bit_enabled(&bitmap, 728));
assert!(!is_feature_bit_enabled(&bitmap, 730));
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/utils.rs | libs/gl-client/src/utils.rs | use crate::tls::TlsConfig;
use anyhow::{anyhow, Result};
pub fn scheduler_uri() -> String {
std::env::var("GL_SCHEDULER_GRPC_URI")
.unwrap_or_else(|_| "https://scheduler.gl.blckstrm.com".to_string())
}
pub fn get_node_id_from_tls_config(tls_config: &TlsConfig) -> Result<Vec<u8>> {
let subject_common_name = match &tls_config.x509_cert {
Some(x) => match x.subject_common_name() {
Some(cn) => cn,
None => {
return Err(anyhow!(
"Failed to parse the subject common name in the provided x509 certificate"
))
}
},
None => {
return Err(anyhow!(
"The certificate could not be parsed in the x509 format"
))
}
};
let split_subject_common_name = subject_common_name.split("/").collect::<Vec<&str>>();
// Must have at least 3 components: "" / "users" / "node_id"
if split_subject_common_name.len() < 3 {
return Err(anyhow!(
"Could not parse subject common name: {}",
subject_common_name
));
} else if split_subject_common_name[1] != "users" {
return Err(anyhow!("Not a users certificate: {}", subject_common_name));
}
hex::decode(split_subject_common_name[2]).map_err(|e| {
anyhow!(
"Unable to decode node_id ({}): {}",
split_subject_common_name[2],
e
)
})
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/credentials.rs | libs/gl-client/src/credentials.rs | use crate::{
scheduler::Scheduler,
signer::Signer,
tls::{self, TlsConfig},
utils::get_node_id_from_tls_config,
};
/// Credentials is a collection of all relevant keys and attestations
/// required to authenticate a device and authorize a command on the node.
/// They represent the identity of a device and can be encoded into a byte
/// format for easy storage.
use log::debug;
use std::{convert::TryFrom, path::Path};
use thiserror;
const CRED_VERSION: u32 = 1u32;
const CA_RAW: &[u8] = include_str!("../.resources/tls/ca.pem").as_bytes();
const NOBODY_CRT: &[u8] = include_str!(env!("GL_NOBODY_CRT")).as_bytes();
const NOBODY_KEY: &[u8] = include_str!(env!("GL_NOBODY_KEY")).as_bytes();
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("could not get from identity: {}", .0)]
GetFromIdentityError(String),
#[error("identity mismatch: {}", .0)]
IsIdentityError(String),
#[error("could not decode into credentials")]
DecodeCredentialsError(#[from] prost::DecodeError),
#[error("could not encode credentials")]
EncodeCredentialError(#[from] prost::EncodeError),
#[error("could not upgrade credentials: {}", .0)]
UpgradeCredentialsError(String),
#[error("could not build credentials {}", .0)]
BuildCredentialsError(String),
#[error("could not create create credentials from data: {}", .0)]
TransformDataIntoCredentialsError(String),
#[error("could not create tls config {}", .0)]
CreateTlsConfigError(#[source] anyhow::Error),
#[error("could not read from file: {}", .0)]
ReadFromFileError(#[from] std::io::Error),
#[error("could not fetch default nobody credentials: {}", .0)]
FetchDefaultNobodyCredentials(#[source] anyhow::Error),
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
pub trait TlsConfigProvider: Send + Sync {
fn tls_config(&self) -> TlsConfig;
}
pub trait RuneProvider {
fn rune(&self) -> String;
}
pub trait NodeIdProvider {
fn node_id(&self) -> Result<Vec<u8>>;
}
/// A helper struct to combine the Tls certificate and the corresponding private
/// key.
#[derive(Clone, Debug)]
struct Identity {
cert: Vec<u8>,
key: Vec<u8>,
}
impl Default for Identity {
fn default() -> Self {
let key = load_file_or_default("GL_NOBODY_KEY", NOBODY_KEY)
.expect("Could not load file from GL_NOBODY_KEY");
let cert = load_file_or_default("GL_NOBODY_CRT", NOBODY_CRT)
.expect("Could not load file from GL_NOBODY_CRT");
Self { cert, key }
}
}
/// The `Nobody` credentials struct. This is an unauthenticated set of
/// credentials and can only be used for registration and recovery.
#[derive(Clone, Debug)]
pub struct Nobody {
pub cert: Vec<u8>,
pub key: Vec<u8>,
pub ca: Vec<u8>,
}
impl Nobody {
/// Returns a new Nobody instance with default parameters.
pub fn new() -> Self {
Self::default()
}
/// Returns a new Nobody instance with a custom set of parameters.
pub fn with<V>(cert: V, key: V) -> Self
where
V: Into<Vec<u8>>,
{
let ca =
load_file_or_default("GL_CA_CRT", CA_RAW).expect("Could not load file from GL_CA_CRT");
Self {
cert: cert.into(),
key: key.into(),
ca,
}
}
pub fn with_ca<V>(self, ca: V) -> Self
where
V: Into<Vec<u8>>,
{
Nobody {
ca: ca.into(),
..self
}
}
}
impl TlsConfigProvider for Nobody {
fn tls_config(&self) -> TlsConfig {
tls::TlsConfig::with(&self.cert, &self.key, &self.ca)
}
}
impl Default for Nobody {
fn default() -> Self {
let ca =
load_file_or_default("GL_CA_CRT", CA_RAW).expect("Could not load file from GL_CA_CRT");
let identity = Identity::default();
Self {
cert: identity.cert,
key: identity.key,
ca,
}
}
}
/// The `Device` credentials store the device's certificate, the device's
/// private key, the certificate authority and the device's rune.
#[derive(Clone, Debug)]
pub struct Device {
pub version: u32,
pub cert: Vec<u8>,
pub key: Vec<u8>,
pub ca: Vec<u8>,
pub rune: String,
}
impl Device {
/// Creates a new set of `Device` credentials from the given
/// credentials data blob. It defaults to the nobody credentials set.
pub fn from_bytes(data: impl AsRef<[u8]>) -> Self {
let mut creds = Self::default();
log::trace!("Build authenticated credentials from: {:?}", data.as_ref());
if let Ok(data) = model::Data::try_from(data.as_ref()) {
creds.version = data.version;
if let Some(cert) = data.cert {
creds.cert = cert
}
if let Some(key) = data.key {
creds.key = key
}
if let Some(ca) = data.ca {
creds.ca = ca
}
if let Some(rune) = data.rune {
creds.rune = rune
}
}
creds
}
/// Creates a new set of `Device` credentials from a path that
/// contains a credentials data blob. Defaults to the nobody
/// credentials set.
pub fn from_path(path: impl AsRef<Path>) -> Self {
debug!("Read credentials data from {:?}", path.as_ref());
let data = std::fs::read(path).unwrap_or_default();
Device::from_bytes(data)
}
/// Creates a new set of `Device` credentials from a complete set of
/// credentials.
pub fn with<V, S>(cert: V, key: V, rune: S) -> Self
where
V: Into<Vec<u8>>,
S: Into<String>,
{
let ca =
load_file_or_default("GL_CA_CRT", CA_RAW).expect("Could not load file from GL_CA_CRT");
Self {
version: CRED_VERSION,
cert: cert.into(),
key: key.into(),
rune: rune.into(),
ca
}
}
pub fn with_ca<V>(self, ca: V) -> Self
where
V: Into<Vec<u8>>,
{
Device {
ca: ca.into(),
..self
}
}
/// Asynchronously upgrades the credentials using the provided scheduler and
/// signer, potentially involving network operations or other async tasks.
pub async fn upgrade<T>(mut self, _scheduler: &Scheduler<T>, signer: &Signer) -> Result<Self>
where
T: TlsConfigProvider,
{
use Error::*;
self.version = CRED_VERSION;
if self.rune.is_empty() {
let node_id = self
.node_id()
.map_err(|e| UpgradeCredentialsError(e.to_string()))?;
let alt = runeauth::Alternative::new(
"pubkey".to_string(),
runeauth::Condition::Equal,
hex::encode(node_id),
false,
)
.map_err(|e| UpgradeCredentialsError(e.to_string()))?;
self.rune = signer
.create_rune(None, vec![vec![&alt.encode()]])
.map_err(|e| UpgradeCredentialsError(e.to_string()))?;
};
Ok(self)
}
/// Returns a byte encoded representation of the credentials. This
/// can be used to store the credentials in one single file.
pub fn to_bytes(&self) -> Vec<u8> {
self.to_owned().into()
}
}
impl TlsConfigProvider for Device {
fn tls_config(&self) -> TlsConfig {
tls::TlsConfig::with(&self.cert, &self.key, &self.ca)
}
}
impl RuneProvider for Device {
fn rune(&self) -> String {
self.to_owned().rune
}
}
impl NodeIdProvider for Device {
fn node_id(&self) -> Result<Vec<u8>> {
get_node_id_from_tls_config(&self.tls_config()).map_err(|_e| {
Error::GetFromIdentityError(
"node_id could not be retrieved from the certificate".to_string(),
)
})
}
}
impl From<Device> for Vec<u8> {
fn from(value: Device) -> Self {
let data: model::Data = value.into();
data.into()
}
}
impl From<Device> for model::Data {
fn from(device: Device) -> Self {
model::Data {
version: CRED_VERSION,
cert: Some(device.cert),
key: Some(device.key),
ca: Some(device.ca),
rune: Some(device.rune),
}
}
}
impl Default for Device {
fn default() -> Self {
let ca =
load_file_or_default("GL_CA_CRT", CA_RAW).expect("Could not load file from GL_CA_CRT");
let identity = Identity::default();
Self {
version: 0,
cert: identity.cert,
key: identity.key,
ca,
rune: Default::default(),
}
}
}
mod model {
use prost::Message;
use std::convert::TryFrom;
/// The Data struct is used for encoding and decoding of credentials. It
/// useses proto for byte encoding.
#[derive(Message, Clone)]
pub struct Data {
#[prost(uint32, tag = "1")]
pub version: u32,
#[prost(bytes, optional, tag = "2")]
pub cert: Option<Vec<u8>>,
#[prost(bytes, optional, tag = "3")]
pub key: Option<Vec<u8>>,
#[prost(bytes, optional, tag = "4")]
pub ca: Option<Vec<u8>>,
#[prost(string, optional, tag = "5")]
pub rune: Option<String>,
}
impl TryFrom<&[u8]> for Data {
type Error = super::Error;
fn try_from(buf: &[u8]) -> std::prelude::v1::Result<Self, Self::Error> {
let data: Data = Data::decode(buf)?;
Ok(data)
}
}
impl From<Data> for Vec<u8> {
fn from(value: Data) -> Self {
value.encode_to_vec()
}
}
}
/// Tries to load nobody credentials from a file that is passed by an envvar and
/// defaults to the nobody cert and key paths that have been set during build-
/// time.
fn load_file_or_default(varname: &str, default: &[u8]) -> Result<Vec<u8>> {
match std::env::var(varname) {
Ok(fname) => {
debug!("Loading file {} for envvar {}", fname, varname);
let f = std::fs::read(fname.clone())?;
Ok(f)
}
Err(_) => Ok(default.to_vec()),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encode() {
let cert: Vec<u8> = vec![99, 98];
let key = vec![97, 96];
let ca = vec![95, 94];
let rune = "non_functional_rune".to_string();
let data = model::Data {
version: 1,
cert: Some(cert.clone()),
key: Some(key.clone()),
ca: Some(ca.clone()),
rune: Some(rune.clone()),
};
let buf: Vec<u8> = data.into();
print!("{:?}", buf);
for n in cert {
assert!(buf.contains(&n));
}
for n in key {
assert!(buf.contains(&n));
}
for n in ca {
assert!(buf.contains(&n));
}
for n in rune.as_bytes() {
assert!(buf.contains(n));
}
}
#[test]
fn test_decode() {
let data: Vec<u8> = vec![
8, 1, 18, 2, 99, 98, 26, 2, 97, 96, 34, 2, 95, 94, 42, 19, 110, 111, 110, 95, 102, 117,
110, 99, 116, 105, 111, 110, 97, 108, 95, 114, 117, 110, 101,
];
let data = model::Data::try_from(&data[..]).unwrap();
assert!(data.version == 1);
assert!(data.cert.is_some_and(|d| d == vec![99, 98]));
assert!(data.key.is_some_and(|d| d == vec![97, 96]));
assert!(data.ca.is_some_and(|d| d == vec![95, 94]));
assert!(data.rune.is_some_and(|d| d == *"non_functional_rune"));
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/export.rs | libs/gl-client/src/export.rs | //! Utilities to work with export/backup files.
use anyhow::{anyhow, Context, Error};
use bytes::{Buf, Bytes, BytesMut};
use chacha20poly1305::{AeadInPlace, ChaCha20Poly1305, KeyInit};
use lightning_signer::bitcoin::{
secp256k1::{ecdh::SharedSecret, PublicKey, Secp256k1, SecretKey},
Network,
};
use std::io::Read;
const VERSION: u8 = 0x01;
/// Version byte, node ID, nonce, tag
const HEADER_LEN: usize = 1 + 33 + 12 + 16;
pub fn decrypt_with_seed(enc: BytesMut, seed: &SecretKey) -> Result<Bytes, Error> {
// Derive the nodeidkey from the seed.
use lightning_signer::signer::derive::{key_derive, KeyDerivationStyle};
let secp = Secp256k1::default();
let d = key_derive(KeyDerivationStyle::Native, Network::Bitcoin);
let (_, node_secret) = d.node_keys(&seed.secret_bytes(), &secp);
decrypt(enc, &node_secret)
}
pub fn decrypt(mut enc: BytesMut, privkey: &SecretKey) -> Result<Bytes, Error> {
let mut r = enc.clone().reader();
// Start by reading the header
let mut version = [0u8; 1];
r.read_exact(&mut version)?;
if VERSION != version[0] {
return Err(anyhow!(
"Backup version {} is not supported by this client version {}",
version[0],
VERSION
));
}
let mut ephkey = [0u8; 33];
r.read_exact(&mut ephkey)?;
let mut nonce = [0u8; 12];
r.read_exact(&mut nonce)?;
let mut tag = [0u8; 16];
r.read_exact(&mut tag)?;
let secp = Secp256k1::default();
let ephkey = PublicKey::from_slice(&ephkey).context("loading ephemeral key")?;
let node_id = privkey.public_key(&secp);
let shared_secret = SharedSecret::new(&ephkey, &privkey);
enc.advance(HEADER_LEN);
let cipher = ChaCha20Poly1305::new(&shared_secret.secret_bytes().into());
cipher
.decrypt_in_place_detached(&nonce.into(), &node_id.serialize(), &mut enc, &tag.into())
.map_err(|e| anyhow!("Error decrypting: {}", e))?;
Ok(enc.clone().into())
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/scheduler.rs | libs/gl-client/src/scheduler.rs | use crate::credentials::{self, RuneProvider, NodeIdProvider, TlsConfigProvider};
use crate::node::{self, GrpcClient};
use crate::pb::scheduler::scheduler_client::SchedulerClient;
use crate::tls::{self};
use crate::utils::scheduler_uri;
use crate::{pb, signer::Signer};
use anyhow::{Result};
use lightning_signer::bitcoin::Network;
use log::debug;
use runeauth;
use tonic::transport::Channel;
type Client = SchedulerClient<Channel>;
/// A scheduler client to interact with the scheduler service. It has
/// different implementations depending on the implementations
#[derive(Clone)]
pub struct Scheduler<Creds> {
client: Client,
network: Network,
grpc_uri: String,
creds: Creds,
ca: Vec<u8>,
}
impl<Creds> Scheduler<Creds>
where
Creds: TlsConfigProvider,
{
/// Creates a new scheduler client with the provided parameters.
/// A scheduler created this way is considered unauthenticated and
/// limited in its scope.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::Nobody;
/// # use gl_client::scheduler::Scheduler;
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Nobody::new();
/// let scheduler = Scheduler::new(network, creds).await.unwrap();
/// # }
/// ```
pub async fn new(network: Network, creds: Creds) -> Result<Scheduler<Creds>> {
let grpc_uri = scheduler_uri();
Self::with(network, creds, grpc_uri).await
}
/// Creates a new scheduler client with the provided parameters and
/// custom URI.
/// A scheduler created this way is considered unauthenticated and
/// limited in its scope.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::Nobody;
/// # use gl_client::scheduler::Scheduler;
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Nobody::new();
/// let uri = "https://example.com".to_string();
/// let scheduler = Scheduler::with(network, creds, uri).await.unwrap();
/// # }
/// ```
pub async fn with(
network: Network,
creds: Creds,
uri: impl Into<String>,
) -> Result<Scheduler<Creds>> {
let uri = uri.into();
debug!("Connecting to scheduler at {}", uri);
let channel = tonic::transport::Endpoint::from_shared(uri.clone())?
.tls_config(creds.tls_config().inner.clone())?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
let client = SchedulerClient::new(channel);
let ca = creds.tls_config().ca.clone();
Ok(Scheduler {
client,
network,
creds,
grpc_uri: uri,
ca,
})
}
}
impl<Creds> Scheduler<Creds> {
/// Registers a new node with the scheduler service.
///
/// # Arguments
///
/// * `signer` - The signer instance bound to the node.
/// * `invite_code` - Optional invite code to register the node.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::Nobody;
/// # use gl_client::{scheduler::Scheduler, signer::Signer};
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Nobody::new();
/// let scheduler = Scheduler::new(network, creds.clone()).await.unwrap();
/// let secret = vec![0, 0, 0, 0];
/// let signer = Signer::new(secret, network, creds).unwrap(); // Create or obtain a signer instance
/// let registration_response = scheduler.register(&signer, None).await.unwrap();
/// # }
/// ```
pub async fn register(
&self,
signer: &Signer,
invite_code: Option<String>,
) -> Result<pb::scheduler::RegistrationResponse> {
let code = invite_code.unwrap_or_default();
self.inner_register(signer, code).await
}
/// We split the register method into one with an invite code and one
/// without an invite code in order to keep the api stable. We might want to
/// remove the invite system in the future and so it does not make sense to
/// change the signature of the register method.
async fn inner_register(
&self,
signer: &Signer,
invite_code: impl Into<String>,
) -> Result<pb::scheduler::RegistrationResponse> {
log::debug!("Retrieving challenge for registration");
let challenge = self
.client
.clone()
.get_challenge(pb::scheduler::ChallengeRequest {
scope: pb::scheduler::ChallengeScope::Register as i32,
node_id: signer.node_id(),
})
.await?
.into_inner();
log::trace!("Got a challenge: {}", hex::encode(&challenge.challenge));
let signature = signer.sign_challenge(challenge.challenge.clone())?;
let device_cert = tls::generate_self_signed_device_cert(
&hex::encode(signer.node_id()),
"default",
vec!["localhost".into()],
None,
);
let device_csr = device_cert.serialize_request_pem()?;
debug!("Requesting registration with csr:\n{}", device_csr);
let startupmsgs = signer
.get_startup_messages()
.into_iter()
.map(|m| m.into())
.collect();
let mut res = self
.client
.clone()
.register(pb::scheduler::RegistrationRequest {
node_id: signer.node_id(),
bip32_key: signer.bip32_ext_key(),
network: self.network.to_string(),
challenge: challenge.challenge,
signer_proto: signer.version().to_owned(),
init_msg: signer.get_init(),
signature,
csr: device_csr.into_bytes(),
invite_code: invite_code.into(),
startupmsgs,
})
.await?
.into_inner();
// This step ensures backwards compatibility with the backend. If we did
// receive a device key, the backend did not sign the csr and we need to
// return the response as it is. If the device key is empty, the csr was
// signed and we return the client side generated private key.
if res.device_key.is_empty() {
debug!("Received signed certificate:\n{}", &res.device_cert);
// We intercept the response and replace the private key with the
// private key of the device_cert. This private key has been generated
// on and has never left the client device.
res.device_key = device_cert.serialize_private_key_pem();
}
let public_key = device_cert.get_key_pair().public_key_raw();
debug!(
"Asking signer to create a rune for public key {}",
hex::encode(public_key)
);
// Create a new rune for the tls certs public key and append it to the
// grpc response. Restricts the rune to the public key used for mTLS
// authentication.
let alt = runeauth::Alternative::new(
"pubkey".to_string(),
runeauth::Condition::Equal,
hex::encode(public_key),
false,
)?;
res.rune = signer.create_rune(None, vec![vec![&alt.encode()]])?;
// Create a `credentials::Device` struct and serialize it into byte format to
// return. This can than be stored on the device.
let creds = credentials::Device::with(
res.device_cert.clone().into_bytes(),
res.device_key.clone().into_bytes(),
res.rune.clone(),
);
res.creds = creds.to_bytes();
Ok(res)
}
/// Recovers a previously registered node with the scheduler service.
///
/// # Arguments
///
/// * `signer` - The signer instance used to sign the recovery challenge.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::Nobody;
/// # use gl_client::{scheduler::Scheduler, signer::Signer};
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Nobody::new();
/// let scheduler = Scheduler::new(network, creds.clone()).await.unwrap();
/// let secret = vec![0, 0, 0, 0];
/// let signer = Signer::new(secret, network, creds).unwrap(); // Create or obtain a signer instance
/// let recovery_response = scheduler.recover(&signer).await.unwrap();
/// # }
/// ```
pub async fn recover(&self, signer: &Signer) -> Result<pb::scheduler::RecoveryResponse> {
let challenge = self
.client
.clone()
.get_challenge(pb::scheduler::ChallengeRequest {
scope: pb::scheduler::ChallengeScope::Recover as i32,
node_id: signer.node_id(),
})
.await?
.into_inner();
let signature = signer.sign_challenge(challenge.challenge.clone())?;
let name = format!("recovered-{}", hex::encode(&challenge.challenge[0..8]));
let device_cert = tls::generate_self_signed_device_cert(
&hex::encode(signer.node_id()),
&name,
vec!["localhost".into()],
None,
);
let device_csr = device_cert.serialize_request_pem()?;
debug!("Requesting recovery with csr:\n{}", device_csr);
let mut res = self
.client
.clone()
.recover(pb::scheduler::RecoveryRequest {
node_id: signer.node_id(),
challenge: challenge.challenge,
signature,
csr: device_csr.into_bytes(),
})
.await?
.into_inner();
// This step ensures backwards compatibility with the backend. If we did
// receive a device key, the backend did not sign the csr and we need to
// return the response as it is. If the device key is empty, the csr was
// signed and we return the client side generated private key.
if res.device_key.is_empty() {
debug!("Received signed certificate:\n{}", &res.device_cert);
// We intercept the response and replace the private key with the
// private key of the device_cert. This private key has been generated
// on and has never left the client device.
res.device_key = device_cert.serialize_private_key_pem();
}
let public_key = device_cert.get_key_pair().public_key_raw();
debug!(
"Asking signer to create a rune for public key {}",
hex::encode(public_key)
);
// Create a new rune for the tls certs public key and append it to the
// grpc response. Restricts the rune to the public key used for mTLS
// authentication.
let alt = runeauth::Alternative::new(
"pubkey".to_string(),
runeauth::Condition::Equal,
hex::encode(public_key),
false,
)?;
res.rune = signer.create_rune(None, vec![vec![&alt.encode()]])?;
// Create a `credentials::Device` struct and serialize it into byte format to
// return. This can than be stored on the device.
let creds = credentials::Device::with(
res.device_cert.clone().into_bytes(),
res.device_key.clone().into_bytes(),
res.rune.clone(),
);
res.creds = creds.to_bytes();
Ok(res)
}
/// Elevates the scheduler client to an authenticated scheduler client
/// that is able to schedule a node for example.
///
/// # Arguments
///
/// * `creds` - Credentials that carry a TlsConfig and a Rune. These
/// are credentials returned during registration or recovery.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::{Device, Nobody};
/// # use gl_client::{scheduler::Scheduler, signer::Signer};
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Nobody::new();
/// let scheduler_unauthed = Scheduler::new(network, creds.clone()).await.unwrap();
/// let secret = vec![0, 0, 0, 0];
/// let signer = Signer::new(secret, network, creds).unwrap(); // Create or obtain a signer instance
/// let registration_response = scheduler_unauthed.register(&signer, None).await.unwrap();
/// let creds = Device::from_bytes(registration_response.creds);
/// let scheduler_authed = scheduler_unauthed.authenticate(creds);
/// # }
/// ```
pub async fn authenticate<Auth>(&self, creds: Auth) -> Result<Scheduler<Auth>>
where
Auth: TlsConfigProvider + RuneProvider,
{
debug!("Connecting to scheduler at {}", self.grpc_uri);
let channel = tonic::transport::Endpoint::from_shared(self.grpc_uri.clone())?
.tls_config(creds.tls_config().inner.clone())?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
let client = SchedulerClient::new(channel);
Ok(Scheduler {
client,
network: self.network,
creds,
grpc_uri: self.grpc_uri.clone(),
ca: self.ca.clone(),
})
}
}
impl<Creds> Scheduler<Creds>
where
Creds: TlsConfigProvider + RuneProvider + NodeIdProvider + Clone,
{
/// Schedules a node at the scheduler service. Once a node is
/// scheduled one can access it through the node client.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::Device;
/// # use gl_client::{scheduler::Scheduler, node::{Node, Client}};
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Device::from_path("my/path/to/credentials.glc");
/// let scheduler = Scheduler::new(network, creds.clone()).await.unwrap();
/// let info = scheduler.schedule().await.unwrap();
/// let node_client: Client = Node::new(node_id, creds).unwrap().connect(info.grpc_uri).await.unwrap();
/// # }
/// ```
pub async fn schedule(&self) -> Result<pb::scheduler::NodeInfoResponse> {
let res = self
.client
.clone()
.schedule(pb::scheduler::ScheduleRequest {
node_id: self.creds.node_id()?,
})
.await?;
Ok(res.into_inner())
}
/// Schedules a node at the scheduler service and returns a node
/// client.
///
/// # Example
///
/// ```rust
/// # use gl_client::credentials::Device;
/// # use gl_client::scheduler::Scheduler;
/// # use gl_client::node::Client;
/// # use lightning_signer::bitcoin::Network;
/// # async fn example() {
/// let node_id = vec![0, 1, 2, 3];
/// let network = Network::Regtest;
/// let creds = Device::from_path("my/path/to/credentials.glc");
/// let scheduler = Scheduler::new(network, creds.clone()).await.unwrap();
/// let node_client: Client = scheduler.node().await.unwrap();
/// # }
/// ```
pub async fn node<T>(&self) -> Result<T>
where
T: GrpcClient,
{
let res = self.schedule().await?;
node::Node::new(self.creds.node_id()?, self.creds.clone())?
.connect(res.grpc_uri)
.await
}
pub async fn get_node_info(&self, wait: bool) -> Result<pb::scheduler::NodeInfoResponse> {
Ok(self
.client
.clone()
.get_node_info(pb::scheduler::NodeInfoRequest {
node_id: self.creds.node_id()?,
wait: wait,
})
.await?
.into_inner())
}
pub async fn export_node(&self) -> Result<pb::scheduler::ExportNodeResponse> {
Ok(self
.client
.clone()
.export_node(pb::scheduler::ExportNodeRequest {})
.await?
.into_inner())
}
pub async fn get_invite_codes(&self) -> Result<pb::scheduler::ListInviteCodesResponse> {
let res = self
.client
.clone()
.list_invite_codes(pb::scheduler::ListInviteCodesRequest {})
.await?;
Ok(res.into_inner())
}
pub async fn add_outgoing_webhook(
&self,
uri: String,
) -> Result<pb::scheduler::AddOutgoingWebhookResponse> {
let node_id = self.creds.node_id()?;
let res = self
.client
.clone()
.add_outgoing_webhook(pb::scheduler::AddOutgoingWebhookRequest { node_id, uri })
.await?;
Ok(res.into_inner())
}
pub async fn list_outgoing_webhooks(
&self,
) -> Result<pb::scheduler::ListOutgoingWebhooksResponse> {
let node_id = self.creds.node_id()?;
let res = self
.client
.clone()
.list_outgoing_webhooks(pb::scheduler::ListOutgoingWebhooksRequest { node_id })
.await?;
Ok(res.into_inner())
}
pub async fn delete_webhooks(&self, webhook_ids: Vec<i64>) -> Result<pb::greenlight::Empty> {
let node_id = self.creds.node_id()?;
let res = self
.client
.clone()
.delete_webhooks(pb::scheduler::DeleteOutgoingWebhooksRequest {
node_id,
ids: webhook_ids,
})
.await?;
Ok(res.into_inner())
}
pub async fn rotate_outgoing_webhook_secret(
&self,
webhook_id: i64,
) -> Result<pb::scheduler::WebhookSecretResponse> {
let node_id = self.creds.node_id()?;
let res = self
.client
.clone()
.rotate_outgoing_webhook_secret(pb::scheduler::RotateOutgoingWebhookSecretRequest {
node_id,
webhook_id,
})
.await?;
Ok(res.into_inner())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/node/service.rs | libs/gl-client/src/node/service.rs | use anyhow::{anyhow, Result};
use http::{Request, Response};
use log::{debug, trace};
use rustls_pemfile as pemfile;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use tonic::body::BoxBody;
use tonic::transport::Body;
use tonic::transport::Channel;
use tower::{Layer, Service};
use ring::signature::KeyPair;
use ring::{
rand,
signature::{self, EcdsaKeyPair},
};
pub struct AuthLayer {
key: Vec<u8>,
rune: String,
}
impl AuthLayer {
pub fn new(pem: Vec<u8>, rune: String) -> Result<Self> {
// Try to convert the key into a keypair to make sure it works later
// when we need it.
let key = {
let mut key = std::io::Cursor::new(&pem[..]);
match pemfile::pkcs8_private_keys(&mut key) {
Ok(v) => v,
Err(e) => {
return Err(anyhow!(
"Could not decode PEM string into PKCS#8 format: {}",
e
))
}
}
.remove(0)
};
match EcdsaKeyPair::from_pkcs8(&signature::ECDSA_P256_SHA256_FIXED_SIGNING, key.as_ref()) {
Ok(_) => trace!("Successfully decoded keypair from PEM string"),
Err(e) => return Err(anyhow!("Could not decide keypair from PEM string: {}", e)),
};
Ok(AuthLayer { key, rune })
}
}
impl Layer<Channel> for AuthLayer {
type Service = AuthService;
fn layer(&self, inner: Channel) -> Self::Service {
AuthService {
key: self.key.clone(),
inner,
rune: self.rune.clone(),
}
}
}
#[derive(Clone)]
pub struct AuthService {
// PKCS#8 formatted private key
key: Vec<u8>,
inner: Channel,
rune: String,
}
impl Service<Request<BoxBody>> for AuthService {
type Response = Response<Body>;
type Error = Box<dyn std::error::Error + Send + Sync>;
#[allow(clippy::type_complexity)]
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: Request<BoxBody>) -> Self::Future {
use base64::Engine;
let engine = base64::engine::general_purpose::STANDARD_NO_PAD;
// This is necessary because tonic internally uses `tower::buffer::Buffer`.
// See https://github.com/tower-rs/tower/issues/547#issuecomment-767629149
// for details on why this is necessary
let clone = self.inner.clone();
let mut inner = std::mem::replace(&mut self.inner, clone);
let keypair = EcdsaKeyPair::from_pkcs8(
&signature::ECDSA_P256_SHA256_FIXED_SIGNING,
self.key.as_ref(),
)
.unwrap();
let rune = self.rune.clone();
Box::pin(async move {
use bytes::BufMut;
use std::convert::TryInto;
use tonic::codegen::Body;
// Returns UTC on all platforms, no need to handle
// timezones.
let time = std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)?
.as_millis();
let (mut parts, mut body) = request.into_parts();
let data = body.data().await.unwrap().unwrap();
// Copy used to create the signature (payload + associated data)
let mut buf = data.to_vec();
// Associated data that is covered by the signature
let mut ts = vec![];
ts.put_u64(time.try_into()?);
buf.put_u64(time.try_into()?);
let rng = rand::SystemRandom::new();
let pubkey = keypair.public_key().as_ref();
let sig = keypair.sign(&rng, &buf).unwrap();
// We use base64 encoding simply because it is
// slightly more compact and we already have it as
// a dependency from rustls. Sizes are as follows:
//
// - Pubkey: raw=65, hex=130, base64=88
// - Signature: raw=64, hex=128, base64=88
//
// For an overall saving of 82 bytes per request,
// and a total overhead of 199 bytes per request.
parts
.headers
.insert("glauthpubkey", engine.encode(&pubkey).parse().unwrap());
parts
.headers
.insert("glauthsig", engine.encode(sig).parse().unwrap());
parts
.headers
.insert("glts", engine.encode(ts).parse().unwrap());
// Runes already come base64 URL_SAFE encoded.
parts
.headers
.insert("glrune", rune.parse().expect("Could not parse rune"));
trace!("Payload size: {} (timestamp {})", data.len(), time);
let body = crate::node::stasher::StashBody::new(data).into();
let request = Request::from_parts(parts, body);
debug!("Sending request {:?}", request);
let response = inner.call(request).await?;
Ok(response)
})
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/node/mod.rs | libs/gl-client/src/node/mod.rs | use crate::credentials::{RuneProvider, TlsConfigProvider};
use crate::pb::cln::node_client as cln_client;
use crate::pb::node_client::NodeClient;
use crate::pb::scheduler::{scheduler_client::SchedulerClient, ScheduleRequest};
use crate::tls::TlsConfig;
use crate::utils;
use anyhow::{anyhow, Result};
use log::{debug, info, trace};
use tonic::transport::{Channel, Uri};
use tower::ServiceBuilder;
const DEFAULT_MAX_DECODING_MESSAGE_SIZE: usize = 32 * 1024 * 1024; // 32 MiB
/// A client to the remotely running node on the greenlight
/// infrastructure. It is configured to authenticate itself with the
/// device mTLS keypair and will sign outgoing requests with the same
/// mTLS keypair.
pub type Client = NodeClient<service::AuthService>;
pub type GClient = GenericClient<service::AuthService>;
pub type ClnClient = cln_client::NodeClient<service::AuthService>;
pub trait GrpcClient {
fn new_with_inner(inner: service::AuthService, max_decoding_message_size: usize) -> Self;
}
/// A builder to configure a [`Client`] that can either connect to a
/// node directly, assuming you have the `grpc_uri` that the node is
/// listening on, or it can talk to the
/// [`crate::scheduler::Scheduler`] to schedule the node and configure
/// the [`Client`] accordingly.
#[allow(dead_code)]
#[derive(Clone)]
pub struct Node {
node_id: Vec<u8>,
tls: TlsConfig,
rune: String,
max_decoding_message_size: Option<usize>,
}
impl GrpcClient for Client {
fn new_with_inner(inner: service::AuthService, max_decoding_message_size: usize) -> Self {
Client::new(inner).max_decoding_message_size(max_decoding_message_size)
}
}
impl GrpcClient for GClient {
fn new_with_inner(inner: service::AuthService, max_decoding_message_size: usize) -> Self {
GenericClient::new(inner).max_decoding_message_size(max_decoding_message_size)
}
}
impl GrpcClient for ClnClient {
fn new_with_inner(inner: service::AuthService, max_decoding_message_size: usize) -> Self {
ClnClient::new(inner).max_decoding_message_size(max_decoding_message_size)
}
}
impl Node {
pub fn new<Creds>(node_id: Vec<u8>, creds: Creds) -> Result<Node>
where
Creds: TlsConfigProvider + RuneProvider,
{
let tls = creds.tls_config();
let rune = creds.rune();
Ok(Node {
node_id,
tls,
rune,
max_decoding_message_size: None,
})
}
pub fn with_max_decoding_message_size(mut self, size: usize) -> Self {
self.max_decoding_message_size = Some(size);
self
}
pub async fn connect<C>(&self, node_uri: String) -> Result<C>
where
C: GrpcClient,
{
let node_uri = Uri::from_maybe_shared(node_uri)?;
info!("Connecting to node at {}", node_uri);
// If this is not yet a node-domain address we need to also
// accept "localhost" as domain name from the certificate.
let host = node_uri.host().unwrap();
let tls = if host.starts_with("gl") {
trace!(
"Using real hostname {}, expecting the node to have a matching certificate",
host
);
self.tls.clone()
} else {
trace!(
"Overriding hostname, since this is not a gl node domain: {}",
host
);
let mut tls = self.tls.clone();
tls.inner = tls.inner.domain_name("localhost");
tls
};
let layer = match tls.private_key {
Some(k) => service::AuthLayer::new(k, self.rune.clone())?,
None => {
return Err(anyhow!(
"Cannot connect a node::Client without first configuring its identity"
))
}
};
let chan = tonic::transport::Endpoint::from_shared(node_uri.to_string())?
.tls_config(tls.inner)?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
let chan = ServiceBuilder::new().layer(layer).service(chan);
let size = self
.max_decoding_message_size
.unwrap_or(DEFAULT_MAX_DECODING_MESSAGE_SIZE);
Ok(C::new_with_inner(chan, size))
}
pub async fn schedule_with_uri<C>(self, scheduler_uri: String) -> Result<C>
where
C: GrpcClient,
{
debug!(
"Contacting scheduler at {} to get the node address",
scheduler_uri
);
let channel = Channel::from_shared(scheduler_uri)?
.tls_config(self.tls.inner.clone())?
.connect()
.await?;
let mut scheduler = SchedulerClient::new(channel);
let node_info = scheduler
.schedule(ScheduleRequest {
node_id: self.node_id.clone(),
})
.await
.map(|v| v.into_inner())?;
debug!("Node scheduled at {}", node_info.grpc_uri);
self.connect(node_info.grpc_uri).await
}
pub async fn schedule<C>(self) -> Result<C>
where
C: GrpcClient,
{
let uri = utils::scheduler_uri();
self.schedule_with_uri(uri).await
}
}
mod generic;
pub mod service;
pub use generic::GenericClient;
mod stasher {
use bytes::Bytes;
use http::HeaderMap;
use http_body::Body;
use pin_project::pin_project;
use std::{
pin::Pin,
task::{Context, Poll},
};
use tonic::body::BoxBody;
use tonic::Status;
#[pin_project]
#[derive(Debug)]
pub(crate) struct StashBody {
value: Option<Bytes>,
}
impl StashBody {
pub(crate) fn new(val: Bytes) -> Self {
Self { value: Some(val) }
}
}
impl Body for StashBody {
type Data = Bytes;
type Error = Status;
fn is_end_stream(&self) -> bool {
self.value.is_none()
}
fn poll_data(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
Poll::Ready(self.project().value.take().map(Ok))
}
fn poll_trailers(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<Option<HeaderMap>, Status>> {
Poll::Ready(Ok(None))
}
}
impl From<StashBody> for BoxBody {
fn from(v: StashBody) -> BoxBody {
BoxBody::new(v)
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/node/generic.rs | libs/gl-client/src/node/generic.rs | //! An implementation of a Grpc Client that does not perform protobuf
//! encoding/decoding. It takes already encoded protobuf messages as
//! `Vec<u8>`, along with the URI and returns the unparsed results to
//! the caller, or a `tonic::Status` in case of failure. This is
//! rather useful when creating bindings, in that only the
//! `GenericClient` and its `call` method need to be mapped through
//! the language boundary, making for a slim interface. This is in
//! contrast to the fat generated interface in which each
//! `tonic::Service` and method on that service is spelled out, and
//! would make for a very wide interface to be mapped.
use bytes::{Buf, BufMut, Bytes};
use http_body::Body;
use log::trace;
use std::str::FromStr;
use tonic::codegen::StdError;
const CODEC: VecCodec = VecCodec {};
const DECODER: VecDecoder = VecDecoder {};
const ENCODER: VecEncoder = VecEncoder {};
/// A GRPC client that can call and return pre-encoded messages. Used
/// by the language bindings to keep the interface between languages
/// small: the client language is used to encode the protobuf
/// payloads, and on the Rust side we just expose the `call` method.
#[derive(Debug, Clone)]
pub struct GenericClient<T> {
inner: tonic::client::Grpc<T>,
}
impl<T> GenericClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::ResponseBody: http_body::Body<Data = bytes::Bytes> + Send + 'static,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub async fn call(
&mut self,
path: &str,
payload: Vec<u8>,
) -> Result<tonic::Response<bytes::Bytes>, tonic::Status> {
trace!(
"Generic call to {} with {}bytes of payload",
path,
payload.len()
);
self.inner.ready().await.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let path = http::uri::PathAndQuery::from_str(path).unwrap();
self.inner
.unary(tonic::Request::new(payload), path, CODEC)
.await
}
// TODO Add a `streaming_call` for methods that return a stream to the client
pub fn max_decoding_message_size(mut self, limit: usize) -> Self
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
{
self.inner = self.inner.max_decoding_message_size(limit);
self
}
}
/// `tonic::client::Grpc<T>` requires a codec to convert between the
/// in-memory representation (usually protobuf structs generated from
/// IDL) to and from the serialized payload for the call, and the
/// inverse direction for responses. Since the `GenericClient` already
/// takes encoded `Vec<u8>` there is no work for us to do.
#[derive(Default)]
pub struct VecCodec {}
impl Codec for VecCodec {
type Encode = Vec<u8>;
type Decode = bytes::Bytes;
type Encoder = VecEncoder;
type Decoder = VecDecoder;
fn encoder(&mut self) -> Self::Encoder {
ENCODER
}
fn decoder(&mut self) -> Self::Decoder {
DECODER
}
}
use tonic::codec::{Codec, Decoder, Encoder};
#[derive(Debug, Clone, Default)]
pub struct VecEncoder;
impl Encoder for VecEncoder {
type Item = Vec<u8>;
type Error = tonic::Status;
fn encode(
&mut self,
item: Self::Item,
buf: &mut tonic::codec::EncodeBuf<'_>,
) -> Result<(), Self::Error> {
buf.put(item.as_slice());
Ok(())
}
}
#[derive(Debug, Clone, Default)]
pub struct VecDecoder;
impl Decoder for VecDecoder {
type Item = bytes::Bytes;
type Error = tonic::Status;
fn decode(
&mut self,
buf: &mut tonic::codec::DecodeBuf<'_>,
) -> Result<Option<Self::Item>, Self::Error> {
let buf = buf.copy_to_bytes(buf.remaining());
Ok(Some(buf))
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/pairing/attestation_device.rs | libs/gl-client/src/pairing/attestation_device.rs | use std::time::{SystemTime, UNIX_EPOCH};
use super::{into_approve_pairing_error, into_verify_pairing_data_error, Error};
use crate::{
credentials::{NodeIdProvider, RuneProvider, TlsConfigProvider},
pb::{
self,
scheduler::{
pairing_client::PairingClient, ApprovePairingRequest, GetPairingDataRequest,
GetPairingDataResponse,
},
},
};
use bytes::BufMut as _;
use picky::{pem::Pem, x509::Csr};
use picky_asn1_x509::{PublicKey, SubjectPublicKeyInfo};
use ring::{
rand,
signature::{self, EcdsaKeyPair, KeyPair},
};
use rustls_pemfile as pemfile;
use tonic::transport::Channel;
type Result<T, E = super::Error> = core::result::Result<T, E>;
pub struct Connected(PairingClient<Channel>);
pub struct Unconnected();
pub struct Client<T, C: TlsConfigProvider + RuneProvider + NodeIdProvider> {
inner: T,
uri: String,
creds: C,
}
impl<C: TlsConfigProvider + RuneProvider + NodeIdProvider> Client<Unconnected, C> {
pub fn new(creds: C) -> Result<Client<Unconnected, C>> {
Ok(Self {
inner: Unconnected(),
uri: crate::utils::scheduler_uri(),
creds,
})
}
pub fn with_uri(mut self, uri: String) -> Client<Unconnected, C> {
self.uri = uri;
self
}
pub async fn connect(self) -> Result<Client<Connected, C>> {
let tls = self.creds.tls_config();
let channel = tonic::transport::Endpoint::from_shared(self.uri.clone())?
.tls_config(tls.inner)?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
let inner = PairingClient::new(channel);
Ok(Client {
inner: Connected(inner),
uri: self.uri,
creds: self.creds,
})
}
}
impl<C: TlsConfigProvider + RuneProvider + NodeIdProvider> Client<Connected, C> {
pub async fn get_pairing_data(&self, device_id: &str) -> Result<GetPairingDataResponse> {
Ok(self
.inner
.0
.clone()
.get_pairing_data(GetPairingDataRequest {
device_id: device_id.to_string(),
})
.await?
.into_inner())
}
pub async fn approve_pairing(
&self,
device_id: &str,
device_name: &str,
restrs: &str,
) -> Result<pb::greenlight::Empty> {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(into_approve_pairing_error)?
.as_secs();
let node_id = self.creds.node_id()?;
// Gather data to sign over.
let mut buf = vec![];
buf.put(device_id.as_bytes());
buf.put_u64(timestamp);
buf.put(&node_id[..]);
buf.put(device_name.as_bytes());
buf.put(restrs.as_bytes());
let tls = self.creds.tls_config();
let tls_key = tls
.clone()
.private_key
.ok_or(Error::BuildClientError("empty tls private key".to_string()))?;
// Sign data.
let key = {
let mut key = std::io::Cursor::new(&tls_key);
pemfile::pkcs8_private_keys(&mut key)
.map_err(into_approve_pairing_error)?
.remove(0)
};
let kp =
EcdsaKeyPair::from_pkcs8(&signature::ECDSA_P256_SHA256_FIXED_SIGNING, key.as_ref())
.map_err(into_approve_pairing_error)?;
let rng = rand::SystemRandom::new();
let sig = kp
.sign(&rng, &buf)
.map_err(into_approve_pairing_error)?
.as_ref()
.to_vec();
// Send approval.
Ok(self
.inner
.0
.clone()
.approve_pairing(ApprovePairingRequest {
device_id: device_id.to_string(),
timestamp,
device_name: device_name.to_string(),
restrictions: restrs.to_string(),
sig: sig,
rune: self.creds.rune(),
pubkey: kp.public_key().as_ref().to_vec(),
})
.await?
.into_inner())
}
pub fn verify_pairing_data(data: GetPairingDataResponse) -> Result<()> {
let mut crs = std::io::Cursor::new(&data.csr);
let pem = Pem::read_from(&mut crs).map_err(into_verify_pairing_data_error)?;
let csr = Csr::from_pem(&pem).map_err(into_verify_pairing_data_error)?;
let sub_pk_der = csr
.public_key()
.to_der()
.map_err(into_verify_pairing_data_error)?;
let sub_pk_info: SubjectPublicKeyInfo =
picky_asn1_der::from_bytes(&sub_pk_der).map_err(into_verify_pairing_data_error)?;
if let PublicKey::Ec(bs) = sub_pk_info.subject_public_key {
let pk = hex::encode(bs.0.payload_view());
if pk == data.device_id
&& Self::restriction_contains_pubkey_exactly_once(
&data.restrictions,
&data.device_id,
)
{
Ok(())
} else {
Err(Error::VerifyPairingDataError(format!(
"device id {} does not match public key {}",
data.device_id, pk
)))
}
} else {
Err(Error::VerifyPairingDataError(format!(
"public key is not ecdsa"
)))
}
}
/// Checks that a restriction string only contains a pubkey field exactly
/// once that is not preceded or followed by a '|' to ensure that it is
/// not part of an alternative but a restriction by itself.
fn restriction_contains_pubkey_exactly_once(s: &str, pubkey: &str) -> bool {
let search_field = format!("pubkey={}", pubkey);
match s.find(&search_field) {
Some(index) => {
// Check if 'pubkey=<pubkey>' is not preceded by '|'
if index > 0 && s.chars().nth(index - 1) == Some('|') {
return false;
}
// Check if 'pubkey=<pubkey>' is not followed by '|'
let end_index = index + search_field.len();
if end_index < s.len() && s.chars().nth(end_index) == Some('|') {
return false;
}
// Check if 'pubkey=<pubkey>' appears exactly once
s.matches(&search_field).count() == 1
}
None => false,
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::{credentials, tls};
#[test]
fn test_verify_pairing_data() {
let kp = tls::generate_ecdsa_key_pair();
let device_cert = tls::generate_self_signed_device_cert(
&hex::encode("00"),
"my-device",
vec!["localhost".into()],
Some(kp),
);
let csr = device_cert.serialize_request_pem().unwrap();
let pk = hex::encode(device_cert.get_key_pair().public_key_raw());
// Check with public key as session id.
let pd = GetPairingDataResponse {
device_id: pk.clone(),
csr: csr.clone().into_bytes(),
device_name: "my-device".to_string(),
description: "".to_string(),
restrictions: format!("pubkey={}", pk.clone()),
};
assert!(Client::<Connected, credentials::Device>::verify_pairing_data(pd).is_ok());
// Check with different "pubkey" restriction than session id.
let pd = GetPairingDataResponse {
device_id: pk.clone(),
csr: csr.clone().into_bytes(),
device_name: "my-device".to_string(),
description: "".to_string(),
restrictions: format!("pubkey={}", "02000000"),
};
assert!(Client::<Connected, credentials::Device>::verify_pairing_data(pd).is_err());
// Check with second "pubkey" in same alternative.
let pd = GetPairingDataResponse {
device_id: pk.clone(),
csr: csr.clone().into_bytes(),
device_name: "my-device".to_string(),
description: "".to_string(),
restrictions: format!("pubkey={}|pubkey=02000000", pk),
};
assert!(Client::<Connected, credentials::Device>::verify_pairing_data(pd).is_err());
// Check with different public key as session id.
let pd = GetPairingDataResponse {
device_id: "00".to_string(),
csr: csr.into_bytes(),
device_name: "my-device".to_string(),
description: "".to_string(),
restrictions: format!("pubkey={}", pk.clone()),
};
assert!(Client::<Connected, credentials::Device>::verify_pairing_data(pd).is_err());
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/pairing/new_device.rs | libs/gl-client/src/pairing/new_device.rs | use super::PairingSessionData;
use crate::{
credentials::{Device, TlsConfigProvider},
pb::scheduler::{pairing_client::PairingClient, PairDeviceRequest},
tls::{self, TlsConfig},
};
use log::debug;
use tokio::sync::mpsc;
use tonic::transport::Channel;
type Result<T, E = super::Error> = core::result::Result<T, E>;
pub struct Unconnected();
pub struct Connected(PairingClient<Channel>);
pub struct Client<T> {
inner: T,
uri: String,
tls: TlsConfig,
}
impl Client<Unconnected> {
pub fn new<T>(creds: T) -> Client<Unconnected>
where
T: TlsConfigProvider,
{
Client {
inner: Unconnected(),
uri: crate::utils::scheduler_uri(),
tls: creds.tls_config(),
}
}
}
impl Client<Unconnected> {
pub fn with_uri(mut self, uri: String) -> Client<Unconnected> {
self.uri = uri;
self
}
pub async fn connect(self) -> Result<Client<Connected>> {
let channel = tonic::transport::Endpoint::from_shared(self.uri.clone())?
.tls_config(self.tls.inner.clone())?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
Ok(Client {
inner: Connected(PairingClient::new(channel)),
uri: self.uri,
tls: self.tls,
})
}
}
impl Client<Connected> {
pub async fn pair_device(
&self,
name: &str,
description: &str,
restrictions: &str,
) -> Result<mpsc::Receiver<PairingSessionData>> {
debug!("Start a new pairing request");
let device_name = name.to_string();
let description = description.to_string();
// Generate key pair.
let kp = tls::generate_ecdsa_key_pair();
// Generate csr.
let device_cert = tls::generate_self_signed_device_cert(
&hex::encode("00"), // We don't know the node id yet, this is to be filled out by the attestation device.
name,
vec!["localhost".into()],
Some(kp),
);
let device_id = hex::encode(device_cert.get_key_pair().public_key_raw());
let csr = device_cert.serialize_request_pem()?;
// Restrictions should always contain the pubkey field to bind them to
// the private key of the device.
let mut restriction = format!("pubkey={}", device_id.clone());
if !restrictions.is_empty() {
// Append restrictions if set.
restriction = format!("{}&{}", restriction, restrictions);
}
let restrictions = restriction;
// Create a channel to communicate beyond the bounds of this function
let (tx, rx) = mpsc::channel(1);
let mut client = self.inner.0.clone();
// The worker that handles the pairing. Communicate to the outside world
// via the channel.
tokio::spawn(async move {
// Step 1 of the Pairing Protocol: Request pairing at the Greenlight
// Backend.
let request = client.pair_device(PairDeviceRequest {
device_id: device_id.clone(),
csr: csr.into_bytes(),
device_name,
description,
restrictions,
});
// Step 2 of the Pairing Protocol: Return the PairingQR for the new
// device to show it to an old device.
let data = format!("gl-pairing:{}", device_id);
tx.send(PairingSessionData::PairingQr(data))
.await
.expect("could not pass qr data to the channel"); // We can unwrap here as there is no need to continue if the channel is broken.
// Step 8 of the Pairing Protocol: Get back the response. We do fire
// and forget here.
let _ = match request.await {
Ok(r) => {
let mut res = r.into_inner();
res.device_key = device_cert.serialize_private_key_pem();
let creds = Device::with(
res.device_cert.clone().into_bytes(),
res.device_key.clone().into_bytes(),
res.rune.clone(),
);
res.creds = creds.into();
tx.send(PairingSessionData::PairingResponse(res))
}
Err(e) => {
debug!("got an error during pairing process {}.", e);
tx.send(PairingSessionData::PairingError(e))
}
}
.await;
return;
});
Ok(rx)
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/pairing/mod.rs | libs/gl-client/src/pairing/mod.rs | use crate::{credentials, pb::scheduler::PairDeviceResponse};
use thiserror::Error;
pub mod attestation_device;
pub mod new_device;
#[derive(Error, Debug)]
pub enum Error {
#[error(transparent)]
TransportError(#[from] tonic::transport::Error),
#[error(transparent)]
X509Error(#[from] rcgen::RcgenError),
#[error("could not build client: {0}")]
BuildClientError(String),
#[error(transparent)]
GrpcError(#[from] tonic::Status),
#[error(transparent)]
CredentialsError(#[from] credentials::Error),
#[error(transparent)]
RuneError(#[from] runeauth::RuneError),
#[error("could not approve pairing: {0}")]
ApprovePairingError(String),
#[error("could not verify pairing data: {0}")]
VerifyPairingDataError(String),
}
#[derive(Debug)]
pub enum PairingSessionData {
PairingResponse(PairDeviceResponse),
PairingQr(String),
PairingError(tonic::Status),
}
fn into_approve_pairing_error<T: ToString>(v: T) -> Error {
Error::ApprovePairingError(v.to_string())
}
fn into_verify_pairing_data_error<T: ToString>(v: T) -> Error {
Error::VerifyPairingDataError(v.to_string())
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/lnurl/utils.rs | libs/gl-client/src/lnurl/utils.rs | use std::str::FromStr;
use anyhow::{anyhow, Result};
use bech32::FromBase32;
use crate::lightning_invoice::Bolt11Invoice;
// Function to decode and parse the lnurl into a URL
pub fn parse_lnurl(lnurl: &str) -> Result<String> {
let (_hrp, data, _variant) =
bech32::decode(lnurl).map_err(|e| anyhow!("Failed to decode lnurl: {}", e))?;
let vec = Vec::<u8>::from_base32(&data)
.map_err(|e| anyhow!("Failed to base32 decode data: {}", e))?;
let url = String::from_utf8(vec).map_err(|e| anyhow!("Failed to convert to utf-8: {}", e))?;
Ok(url)
}
// Get an Invoice from a Lightning Network URL pay request
pub fn parse_invoice(invoice_str: &str) -> Result<Bolt11Invoice> {
Bolt11Invoice::from_str(&invoice_str).map_err(|e| anyhow!(format!("Failed to parse invoice: {}", e)))
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/lnurl/mod.rs | libs/gl-client/src/lnurl/mod.rs | mod models;
mod pay;
mod utils;
mod withdraw;
use self::models::{
LnUrlHttpClient, PayRequestCallbackResponse, PayRequestResponse, WithdrawRequestResponse,
};
use self::utils::{parse_invoice, parse_lnurl};
use crate::node::ClnClient;
use crate::pb::cln::{amount_or_any, Amount, AmountOrAny};
use anyhow::{anyhow, Result};
use models::LnUrlHttpClearnetClient;
use pay::{resolve_lnurl_to_invoice, validate_invoice_from_callback_response};
use url::Url;
use withdraw::{build_withdraw_request_callback_url, parse_withdraw_request_response_from_url};
pub struct LNURL<T: LnUrlHttpClient> {
http_client: T,
}
impl<T: LnUrlHttpClient> LNURL<T> {
pub fn new(http_client: T) -> Self {
LNURL { http_client }
}
pub fn new_with_clearnet_client() -> LNURL<LnUrlHttpClearnetClient> {
let http_client = LnUrlHttpClearnetClient::new();
LNURL { http_client }
}
pub async fn get_pay_request_response(&self, lnurl: &str) -> Result<PayRequestResponse> {
let url = parse_lnurl(lnurl)?;
let lnurl_pay_request_response: PayRequestResponse =
self.http_client.get_pay_request_response(&url).await?;
if lnurl_pay_request_response.tag != "payRequest" {
return Err(anyhow!("Expected tag to say 'payRequest'"));
}
Ok(lnurl_pay_request_response)
}
pub async fn get_pay_request_callback_response(
&self,
base_callback_url: &str,
amount_msats: u64,
metadata: &str,
) -> Result<PayRequestCallbackResponse> {
let mut url = Url::parse(base_callback_url)?;
url.query_pairs_mut()
.append_pair("amount", &amount_msats.to_string());
let callback_response: PayRequestCallbackResponse = self
.http_client
.get_pay_request_callback_response(&url.to_string())
.await?;
let invoice = parse_invoice(&callback_response.pr)?;
validate_invoice_from_callback_response(&invoice, amount_msats, metadata)?;
Ok(callback_response)
}
pub async fn pay(
&self,
lnurl: &str,
amount_msats: u64,
node: &mut ClnClient,
) -> Result<tonic::Response<crate::pb::cln::PayResponse>> {
let invoice = resolve_lnurl_to_invoice(&self.http_client, lnurl, amount_msats).await?;
node.pay(crate::pb::cln::PayRequest {
bolt11: invoice.to_string(),
..Default::default()
})
.await
.map_err(|e| anyhow!(e))
}
pub async fn get_withdraw_request_response(
&self,
lnurl: &str,
) -> Result<WithdrawRequestResponse> {
let url = parse_lnurl(lnurl)?;
let withdrawal_request_response = parse_withdraw_request_response_from_url(&url);
//If it's not a quick withdraw, then get the withdrawal_request_response from the web.
let withdrawal_request_response = match withdrawal_request_response {
Some(w) => w,
None => {
self.http_client
.get_withdrawal_request_response(&url)
.await?
}
};
Ok(withdrawal_request_response)
}
pub async fn withdraw(
&self,
lnurl: &str,
amount_msats: u64,
node: &mut ClnClient,
) -> Result<()> {
let withdraw_request_response = self.get_withdraw_request_response(lnurl).await?;
let amount = AmountOrAny {
value: Some(amount_or_any::Value::Amount(Amount { msat: amount_msats })),
};
let invoice = node
.invoice(crate::pb::cln::InvoiceRequest {
amount_msat: Some(amount),
description: withdraw_request_response.default_description.clone(),
..Default::default()
})
.await
.map_err(|e| anyhow!(e))?
.into_inner();
let callback_url =
build_withdraw_request_callback_url(&withdraw_request_response, invoice.bolt11)?;
let _ = self
.http_client
.send_invoice_for_withdraw_request(&callback_url);
Ok(())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/lnurl/models.rs | libs/gl-client/src/lnurl/models.rs | use anyhow::{anyhow, Result};
use async_trait::async_trait;
use log::debug;
use mockall::automock;
use reqwest::Response;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct PayRequestResponse {
pub callback: String,
#[serde(rename = "maxSendable")]
pub max_sendable: u64,
#[serde(rename = "minSendable")]
pub min_sendable: u64,
pub tag: String,
pub metadata: String,
}
#[derive(Deserialize)]
pub struct PayRequestCallbackResponse {
pub pr: String,
pub routes: Vec<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct OkResponse {
status: String,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct ErrorResponse {
status: String,
reason: String,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct WithdrawRequestResponse {
pub tag: String,
pub callback: String,
pub k1: String,
#[serde(rename = "defaultDescription")]
pub default_description: String,
#[serde(rename = "minWithdrawable")]
pub min_withdrawable: u64,
#[serde(rename = "maxWithdrawable")]
pub max_withdrawable: u64,
}
#[async_trait]
#[automock]
pub trait LnUrlHttpClient {
async fn get_pay_request_response(&self, lnurl: &str) -> Result<PayRequestResponse>;
async fn get_pay_request_callback_response(
&self,
callback_url: &str,
) -> Result<PayRequestCallbackResponse>;
async fn get_withdrawal_request_response(&self, url: &str) -> Result<WithdrawRequestResponse>;
async fn send_invoice_for_withdraw_request(&self, url: &str) -> Result<OkResponse>;
}
pub struct LnUrlHttpClearnetClient {
client: reqwest::Client,
}
impl LnUrlHttpClearnetClient {
pub fn new() -> LnUrlHttpClearnetClient {
LnUrlHttpClearnetClient {
client: reqwest::Client::new(),
}
}
async fn get<T: DeserializeOwned + 'static>(&self, url: &str) -> Result<T> {
let response: Response = self.client.get(url).send().await?;
match response.json::<T>().await {
Ok(body) => Ok(body),
Err(e) => {
debug!("{}", e);
Err(anyhow!("Unable to parse http response body as json"))
}
}
}
}
#[async_trait]
impl LnUrlHttpClient for LnUrlHttpClearnetClient {
async fn get_pay_request_response(&self, lnurl: &str) -> Result<PayRequestResponse> {
self.get::<PayRequestResponse>(lnurl).await
}
async fn get_pay_request_callback_response(
&self,
callback_url: &str,
) -> Result<PayRequestCallbackResponse> {
self.get::<PayRequestCallbackResponse>(callback_url).await
}
async fn get_withdrawal_request_response(&self, url: &str) -> Result<WithdrawRequestResponse> {
self.get::<WithdrawRequestResponse>(url).await
}
async fn send_invoice_for_withdraw_request(&self, url: &str) -> Result<OkResponse>{
self.get::<OkResponse>(url).await
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/lnurl/pay/mod.rs | libs/gl-client/src/lnurl/pay/mod.rs | use super::models;
use super::utils::parse_lnurl;
use crate::lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription};
use crate::lnurl::{
models::{LnUrlHttpClient, PayRequestCallbackResponse, PayRequestResponse},
utils::parse_invoice,
};
use anyhow::{anyhow, ensure, Result};
use log::debug;
use reqwest::Url;
use sha256;
pub async fn resolve_lnurl_to_invoice<T: LnUrlHttpClient>(
http_client: &T,
lnurl_identifier: &str,
amount_msats: u64,
) -> Result<String> {
let url = match is_lnurl(lnurl_identifier) {
true => parse_lnurl(lnurl_identifier)?,
false => parse_lightning_address(lnurl_identifier)?,
};
debug!("Domain: {}", Url::parse(&url).unwrap().host().unwrap());
let lnurl_pay_request_response: PayRequestResponse =
http_client.get_pay_request_response(&url).await?;
validate_pay_request_response(lnurl_identifier, &lnurl_pay_request_response, amount_msats)?;
let callback_url = build_callback_url(&lnurl_pay_request_response, amount_msats)?;
let callback_response: PayRequestCallbackResponse = http_client
.get_pay_request_callback_response(&callback_url)
.await?;
let invoice = parse_invoice(&callback_response.pr)?;
validate_invoice_from_callback_response(
&invoice,
amount_msats,
&lnurl_pay_request_response.metadata,
)?;
Ok(invoice.to_string())
}
fn is_lnurl(lnurl_identifier: &str) -> bool {
const LNURL_PREFIX: &str = "LNURL";
lnurl_identifier
.trim()
.to_uppercase()
.starts_with(LNURL_PREFIX)
}
pub fn validate_pay_request_response(
lnurl_identifier: &str,
lnurl_pay_request_response: &PayRequestResponse,
amount_msats: u64,
) -> Result<()> {
if lnurl_pay_request_response.tag != "payRequest" {
return Err(anyhow!("Expected tag to say 'payRequest'"));
}
ensure_amount_is_within_range(&lnurl_pay_request_response, amount_msats)?;
let description = extract_description(&lnurl_pay_request_response)?;
debug!("Description: {}", description);
debug!(
"Accepted range (in millisatoshis): {} - {}",
lnurl_pay_request_response.min_sendable, lnurl_pay_request_response.max_sendable
);
if !is_lnurl(lnurl_identifier) {
let deserialized_metadata: Vec<Vec<String>> =
serde_json::from_str(&lnurl_pay_request_response.metadata.to_owned())
.map_err(|e| anyhow!("Failed to deserialize metadata: {}", e))?;
let mut identifier = String::new();
let metadata_entry_types = ["text/email", "text/identifier"];
for metadata in deserialized_metadata {
let x = &*metadata[0].clone();
if metadata_entry_types.contains(&x) {
identifier = String::from(metadata[1].clone());
break;
}
}
if identifier.is_empty() {
return Err(anyhow!("Could not find an entry of type "));
}
if identifier != lnurl_identifier {
return Err(anyhow!("The lightning address specified in the original request does not match what was found in the metadata array"));
}
}
Ok(())
}
// Validates the invoice on the pay request's callback response
pub fn validate_invoice_from_callback_response(
invoice: &Bolt11Invoice,
amount_msats: u64,
metadata: &str,
) -> Result<()> {
ensure!(invoice.amount_milli_satoshis().unwrap_or_default() == amount_msats ,
"Amount found in invoice was not equal to the amount found in the original request\nRequest amount: {}\nInvoice amount:{:?}", amount_msats, invoice.amount_milli_satoshis().unwrap()
);
let description_hash: String = match invoice.description() {
Bolt11InvoiceDescription::Direct(d) => sha256::digest(d.to_string()),
Bolt11InvoiceDescription::Hash(h) => h.0.to_string(),
};
ensure!(
description_hash == sha256::digest(metadata),
"description_hash {} does not match the hash of the metadata {}",
description_hash,
sha256::digest(metadata)
);
Ok(())
}
// Function to extract the description from the lnurl pay request response
fn extract_description(lnurl_pay_request_response: &PayRequestResponse) -> Result<String> {
let mut description = String::new();
let serialized_metadata = lnurl_pay_request_response.metadata.clone();
let deserialized_metadata: Vec<Vec<String>> =
serde_json::from_str(&serialized_metadata.to_owned())
.map_err(|e| anyhow!("Failed to deserialize metadata: {}", e))?;
for metadata in deserialized_metadata {
if metadata[0] == "text/plain" {
description = metadata[1].clone();
}
}
Ok(description)
}
// Function to build the callback URL based on lnurl pay request response and amount
fn build_callback_url(
lnurl_pay_request_response: &models::PayRequestResponse,
amount: u64,
) -> Result<String> {
let mut url = Url::parse(&lnurl_pay_request_response.callback)?;
url.query_pairs_mut()
.append_pair("amount", &amount.to_string());
Ok(url.to_string())
}
// Validates the pay request response for expected values
fn ensure_amount_is_within_range(
lnurl_pay_request_response: &PayRequestResponse,
amount: u64,
) -> Result<()> {
if amount < lnurl_pay_request_response.min_sendable {
return Err(anyhow!(
"Amount must be {} or greater",
lnurl_pay_request_response.min_sendable
));
}
if amount > lnurl_pay_request_response.max_sendable {
return Err(anyhow!(
"Amount must be {} or less",
lnurl_pay_request_response.max_sendable
));
}
Ok(())
}
//LUD-16: Paying to static internet identifiers.
pub fn parse_lightning_address(lightning_address: &str) -> Result<String> {
let lightning_address_components: Vec<&str> = lightning_address.split("@").collect();
if lightning_address_components.len() != 2 {
return Err(anyhow!("The provided lightning address is improperly formatted"));
}
let username = match lightning_address_components.get(0) {
None => return Err(anyhow!("Could not parse username in lightning address")),
Some(u) => {
if u.is_empty() {
return Err(anyhow!("Username can not be empty"))
}
u
}
};
let domain = match lightning_address_components.get(1) {
None => return Err(anyhow!("Could not parse domain in lightning address")),
Some(d) => {
if d.is_empty() {
return Err(anyhow!("Domain can not be empty"))
}
d
}
};
let pay_request_url = ["https://", domain, "/.well-known/lnurlp/", username].concat();
return Ok(pay_request_url);
}
#[cfg(test)]
mod tests {
use crate::lnurl::models::MockLnUrlHttpClient;
use futures::future;
use futures::future::Ready;
use std::pin::Pin;
use super::*;
fn convert_to_async_return_value<T: Send + 'static>(
value: T,
) -> Pin<Box<dyn std::future::Future<Output = T> + Send>> {
let ready_future: Ready<_> = future::ready(value);
Pin::new(Box::new(ready_future)) as Pin<Box<dyn std::future::Future<Output = T> + Send>>
}
#[test]
fn test_parse_invoice() {
let invoice_str = "lnbc100p1psj9jhxdqud3jxktt5w46x7unfv9kz6mn0v3jsnp4q0d3p2sfluzdx45tqcsh2pu5qc7lgq0xs578ngs6s0s68ua4h7cvspp5q6rmq35js88zp5dvwrv9m459tnk2zunwj5jalqtyxqulh0l5gflssp5nf55ny5gcrfl30xuhzj3nphgj27rstekmr9fw3ny5989s300gyus9qyysgqcqpcrzjqw2sxwe993h5pcm4dxzpvttgza8zhkqxpgffcrf5v25nwpr3cmfg7z54kuqq8rgqqqqqqqq2qqqqq9qq9qrzjqd0ylaqclj9424x9m8h2vcukcgnm6s56xfgu3j78zyqzhgs4hlpzvznlugqq9vsqqqqqqqlgqqqqqeqq9qrzjqwldmj9dha74df76zhx6l9we0vjdquygcdt3kssupehe64g6yyp5yz5rhuqqwccqqyqqqqlgqqqqjcqq9qrzjqf9e58aguqr0rcun0ajlvmzq3ek63cw2w282gv3z5uupmuwvgjtq2z55qsqqg6qqqyqqqrtnqqqzq3cqygrzjqvphmsywntrrhqjcraumvc4y6r8v4z5v593trte429v4hredj7ms5z52usqq9ngqqqqqqqlgqqqqqqgq9qrzjq2v0vp62g49p7569ev48cmulecsxe59lvaw3wlxm7r982zxa9zzj7z5l0cqqxusqqyqqqqlgqqqqqzsqygarl9fh38s0gyuxjjgux34w75dnc6xp2l35j7es3jd4ugt3lu0xzre26yg5m7ke54n2d5sym4xcmxtl8238xxvw5h5h5j5r6drg6k6zcqj0fcwg";
let result = parse_invoice(invoice_str);
assert!(result.is_ok());
let invoice = result.unwrap();
assert_eq!(invoice.amount_milli_satoshis().unwrap(), 10);
}
#[tokio::test]
async fn test_lnurl_pay() {
let mut mock_http_client = MockLnUrlHttpClient::new();
mock_http_client.expect_get_pay_request_response().returning(|_url| {
let x: PayRequestResponse = serde_json::from_str("{ \"callback\": \"https://cipherpunk.com/lnurlp/api/v1/lnurl/cb/1\", \"maxSendable\": 100000, \"minSendable\": 100, \"tag\": \"payRequest\", \"metadata\": \"[[\\\"text/plain\\\", \\\"Start the CoinTrain\\\"]]\" }").unwrap();
convert_to_async_return_value(Ok(x))
});
mock_http_client.expect_get_pay_request_callback_response().returning(|_url| {
let invoice = "lnbc1u1pjv9qrvsp5e5wwexctzp9yklcrzx448c68q2a7kma55cm67ruajjwfkrswnqvqpp55x6mmz8ch6nahrcuxjsjvs23xkgt8eu748nukq463zhjcjk4s65shp5dd6hc533r655wtyz63jpf6ja08srn6rz6cjhwsjuyckrqwanhjtsxqzjccqpjrzjqw6lfdpjecp4d5t0gxk5khkrzfejjxyxtxg5exqsd95py6rhwwh72rpgrgqq3hcqqgqqqqlgqqqqqqgq9q9qxpqysgq95njz4sz6h7r2qh7txnevcrvg0jdsfpe72cecmjfka8mw5nvm7tydd0j34ps2u9q9h6v5u8h3vxs8jqq5fwehdda6a8qmpn93fm290cquhuc6r";
let callback_response_json = format!("{{\"pr\":\"{}\",\"routes\":[]}}", invoice).to_string();
let x = serde_json::from_str(&callback_response_json).unwrap();
convert_to_async_return_value(Ok(x))
});
let lnurl = "LNURL1DP68GURN8GHJ7CMFWP5X2UNSW4HXKTNRDAKJ7CTSDYHHVVF0D3H82UNV9UCSAXQZE2";
let amount = 100000;
let invoice = resolve_lnurl_to_invoice(&mock_http_client, &lnurl, amount).await;
assert!(invoice.is_ok());
}
#[tokio::test]
async fn test_lnurl_pay_with_lightning_address() {
let mut mock_http_client = MockLnUrlHttpClient::new();
let lightning_address_username = "satoshi";
let lightning_address_domain = "cipherpunk.com";
let lnurl = format!(
"{}@{}",
lightning_address_username, lightning_address_domain
);
let lnurl_clone = lnurl.clone();
mock_http_client.expect_get_pay_request_response().returning(move |url| {
let expected_url = format!("https://{}/.well-known/lnurlp/{}", lightning_address_domain, lightning_address_username);
assert_eq!(expected_url, url);
let pay_request_json = format!("{{\"callback\": \"https://cipherpunk.com/lnurlp/api/v1/lnurl/cb/1\", \"maxSendable\": 100000, \"minSendable\": 100, \"tag\": \"payRequest\", \"metadata\": \"[[\\\"text/plain\\\", \\\"Start the CoinTrain\\\"], [\\\"text/identifier\\\", \\\"{}\\\"]]\" }}", lnurl_clone);
let x: PayRequestResponse = serde_json::from_str(&pay_request_json).unwrap();
convert_to_async_return_value(Ok(x))
});
mock_http_client.expect_get_pay_request_callback_response().returning(|_url| {
let invoice = "lnbcrt1u1pj0ypx6sp5hzczugdw9eyw3fcsjkssux7awjlt68vpj7uhmen7sup0hdlrqxaqpp5gp5fm2sn5rua2jlzftkf5h22rxppwgszs7ncm73pmwhvjcttqp3qdy2tddjyar90p6z7urvv95kug3vyq39xarpwf6zqargv5syxmmfde28yctfdc396tpqtv38getcwshkjer9de6xjenfv4ezytpqyfekzar0wd5xjsrrd9cxsetjwp6ku6ewvdhk6gjat5xqyjw5qcqp29qxpqysgqujuf5zavazln2q9gks7nqwdgjypg2qlvv7aqwfmwg7xmjt8hy4hx2ctr5fcspjvmz9x5wvmur8vh6nkynsvateafm73zwg5hkf7xszsqajqwcf";
let callback_response_json = format!("{{\"pr\":\"{}\",\"routes\":[]}}", invoice).to_string();
let x = serde_json::from_str(&callback_response_json).unwrap();
convert_to_async_return_value(Ok(x))
});
let amount = 100000;
let invoice = resolve_lnurl_to_invoice(&mock_http_client, &lnurl, amount).await;
assert!(invoice.is_ok());
}
#[tokio::test]
async fn test_lnurl_pay_with_lightning_address_fails_with_empty_username() {
let mock_http_client = MockLnUrlHttpClient::new();
let lightning_address_username = "";
let lightning_address_domain = "cipherpunk.com";
let lnurl = format!(
"{}@{}",
lightning_address_username, lightning_address_domain
);
let amount = 100000;
let error = resolve_lnurl_to_invoice(&mock_http_client, &lnurl, amount).await;
assert!(error.is_err());
assert!(error.unwrap_err().to_string().contains("Username can not be empty"));
}
#[tokio::test]
async fn test_lnurl_pay_with_lightning_address_fails_with_empty_domain() {
let mock_http_client = MockLnUrlHttpClient::new();
let lightning_address_username = "satoshi";
let lightning_address_domain = "";
let lnurl = format!(
"{}@{}",
lightning_address_username, lightning_address_domain
);
let amount = 100000;
let error = resolve_lnurl_to_invoice(&mock_http_client, &lnurl, amount).await;
assert!(error.is_err());
assert!(error.unwrap_err().to_string().contains("Domain can not be empty"));
}
#[tokio::test]
async fn test_lnurl_pay_returns_error_on_invalid_lnurl() {
let mock_http_client = MockLnUrlHttpClient::new();
let lnurl = "LNURL1111111111111111111111111111111111111111111111111111111111111111111";
let amount = 100000;
let result = resolve_lnurl_to_invoice(&mock_http_client, lnurl, amount).await;
match result {
Err(err) => {
assert!(err
.to_string()
.contains("Failed to decode lnurl: invalid length"));
}
_ => panic!("Expected an error, but got Ok"),
}
}
#[tokio::test]
async fn test_lnurl_pay_returns_error_on_amount_less_than_min_sendable() {
let mut mock_http_client = MockLnUrlHttpClient::new();
// Set up expectations for the first two calls
mock_http_client.expect_get_pay_request_response().returning(|_url| {
let x: PayRequestResponse = serde_json::from_str("{ \"callback\": \"https://cipherpunk.com/lnurlp/api/v1/lnurl/cb/1\", \"maxSendable\": 100000, \"minSendable\": 100000, \"tag\": \"payRequest\", \"metadata\": \"[[\\\"text/plain\\\", \\\"Start the CoinTrain\\\"]]\" }").unwrap();
convert_to_async_return_value(Ok(x))
});
mock_http_client.expect_get_pay_request_callback_response().returning(|_url| {
let invoice = "lnbc1u1pjv9qrvsp5e5wwexctzp9yklcrzx448c68q2a7kma55cm67ruajjwfkrswnqvqpp55x6mmz8ch6nahrcuxjsjvs23xkgt8eu748nukq463zhjcjk4s65shp5dd6hc533r655wtyz63jpf6ja08srn6rz6cjhwsjuyckrqwanhjtsxqzjccqpjrzjqw6lfdpjecp4d5t0gxk5khkrzfejjxyxtxg5exqsd95py6rhwwh72rpgrgqq3hcqqgqqqqlgqqqqqqgq9q9qxpqysgq95njz4sz6h7r2qh7txnevcrvg0jdsfpe72cecmjfka8mw5nvm7tydd0j34ps2u9q9h6v5u8h3vxs8jqq5fwehdda6a8qmpn93fm290cquhuc6r";
let callback_response_json = format!("{{\"pr\":\"{}\",\"routes\":[]}}", invoice).to_string();
let callback_response = serde_json::from_str(&callback_response_json).unwrap();
convert_to_async_return_value(Ok(callback_response))
});
let lnurl = "LNURL1DP68GURN8GHJ7CMFWP5X2UNSW4HXKTNRDAKJ7CTSDYHHVVF0D3H82UNV9UCSAXQZE2";
let amount = 1;
let result = resolve_lnurl_to_invoice(&mock_http_client, lnurl, amount).await;
match result {
Err(err) => {
assert!(err.to_string().contains("Amount must be"));
}
_ => panic!("Expected an error, but got Ok"),
}
}
#[tokio::test]
async fn test_lnurl_pay_returns_error_on_amount_greater_than_max_sendable() {
let mut mock_http_client = MockLnUrlHttpClient::new();
mock_http_client.expect_get_pay_request_response().returning(|_url| {
let x: PayRequestResponse = serde_json::from_str("{ \"callback\": \"https://cipherpunk.com/lnurlp/api/v1/lnurl/cb/1\", \"maxSendable\": 100000, \"minSendable\": 100000, \"tag\": \"payRequest\", \"metadata\": \"[[\\\"text/plain\\\", \\\"Start the CoinTrain\\\"]]\" }").unwrap();
convert_to_async_return_value(Ok(x))
});
mock_http_client.expect_get_pay_request_callback_response().returning(|_url| {
let invoice = "lnbc1u1pjv9qrvsp5e5wwexctzp9yklcrzx448c68q2a7kma55cm67ruajjwfkrswnqvqpp55x6mmz8ch6nahrcuxjsjvs23xkgt8eu748nukq463zhjcjk4s65shp5dd6hc533r655wtyz63jpf6ja08srn6rz6cjhwsjuyckrqwanhjtsxqzjccqpjrzjqw6lfdpjecp4d5t0gxk5khkrzfejjxyxtxg5exqsd95py6rhwwh72rpgrgqq3hcqqgqqqqlgqqqqqqgq9q9qxpqysgq95njz4sz6h7r2qh7txnevcrvg0jdsfpe72cecmjfka8mw5nvm7tydd0j34ps2u9q9h6v5u8h3vxs8jqq5fwehdda6a8qmpn93fm290cquhuc6r";
let callback_response_json = format!("{{\"pr\":\"{}\",\"routes\":[]}}", invoice).to_string();
let value = serde_json::from_str(&callback_response_json).unwrap();
convert_to_async_return_value(Ok(value))
});
let lnurl = "LNURL1DP68GURN8GHJ7CMFWP5X2UNSW4HXKTNRDAKJ7CTSDYHHVVF0D3H82UNV9UCSAXQZE2";
let amount = 1;
let result = resolve_lnurl_to_invoice(&mock_http_client, lnurl, amount).await;
match result {
Err(err) => {
assert!(err.to_string().contains("Amount must be"));
}
_ => panic!("Expected an error, amount specified is greater than maxSendable"),
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/lnurl/withdraw/mod.rs | libs/gl-client/src/lnurl/withdraw/mod.rs | use super::models::WithdrawRequestResponse;
use anyhow::{anyhow, Result};
use log::debug;
use reqwest::Url;
use serde_json::{to_value, Map, Value};
pub fn build_withdraw_request_callback_url(
lnurl_pay_request_response: &WithdrawRequestResponse,
invoice: String,
) -> Result<String> {
let mut url = Url::parse(&lnurl_pay_request_response.callback)?;
url.query_pairs_mut()
.append_pair("k1", &lnurl_pay_request_response.k1)
.append_pair("pr", &invoice);
Ok(url.to_string())
}
fn convert_value_field_from_str_to_u64(
value: &mut Map<String, Value>,
field_name: &str,
) -> Result<()> {
match value.get(field_name) {
Some(field_value) => match field_value.as_str() {
Some(field_value_str) => {
let converted_field_value = field_value_str.parse::<u64>()?;
//overwrites old type value
value.insert(
String::from(field_name),
to_value(converted_field_value).unwrap(),
);
return Ok(());
}
None => return Err(anyhow!("Failed to convert {} into a str", field_name)),
},
None => return Err(anyhow!("Failed to find {} in map", field_name)),
}
}
pub fn parse_withdraw_request_response_from_url(url: &str) -> Option<WithdrawRequestResponse> {
let url = Url::parse(url).unwrap();
let query_params: Value = url.query_pairs().clone().collect();
if let Some(mut query_params) = query_params.as_object().cloned() {
if convert_value_field_from_str_to_u64(&mut query_params, "minWithdrawable").is_err() {
debug!("minWithdrawable could not be parsed into a number");
return None;
};
if convert_value_field_from_str_to_u64(&mut query_params, "maxWithdrawable").is_err() {
debug!("maxWithdrawable could not be parsed into a number");
return None;
};
match serde_json::from_value(Value::Object(query_params)) {
Ok(w) => {
return w;
},
Err(e) => {
debug!("{:?}", e);
return None;
}
}
}
None
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_build_withdraw_request_callback_url() -> Result<()> {
let k1 = String::from("unique");
let invoice = String::from("invoice");
let built_withdraw_request_callback_url = build_withdraw_request_callback_url(&WithdrawRequestResponse {
tag: String::from("withdraw"),
callback: String::from("https://cipherpunk.com/"),
k1: k1.clone(),
default_description: String::from(""),
min_withdrawable: 2,
max_withdrawable: 300,
}, invoice.clone());
let url = Url::parse(&built_withdraw_request_callback_url.unwrap())?;
let query_pairs = url.query_pairs().collect::<Value>();
let query_params: &Map<String, Value> = query_pairs.as_object().unwrap();
assert_eq!(query_params.get("k1").unwrap().as_str().unwrap(), k1);
assert_eq!(query_params.get("pr").unwrap().as_str().unwrap(), invoice);
Ok(())
}
#[test]
fn test_parse_withdraw_request_response_from_url() {
let withdraw_request = parse_withdraw_request_response_from_url("https://cipherpunk.com?tag=withdraw&callback=cipherpunk.com&k1=42&minWithdrawable=1&maxWithdrawable=100&defaultDescription=");
assert!(withdraw_request.is_some());
}
#[test]
fn test_parse_withdraw_request_response_from_url_fails_when_field_is_missing() {
let withdraw_request = parse_withdraw_request_response_from_url("https://cipherpunk.com?tag=withdraw&callback=cipherpunk.com&k1=42&minWithdrawable=1&maxWithdrawable=100");
assert!(withdraw_request.is_none());
}
#[test]
fn test_parse_withdraw_request_response_from_url_fails_when_min_withdrawable_is_wrong_type() {
let withdraw_request = parse_withdraw_request_response_from_url("https://cipherpunk.com?tag=withdraw&callback=cipherpunk.com&k1=42&minWithdrawable=one&maxWithdrawable=100&defaultDescription=");
assert!(withdraw_request.is_none());
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/report.rs | libs/gl-client/src/signer/report.rs | //! Signer reporting facility to debug issues
//!
//! The resolver and policies implemented in the signer may produce
//! false negatives, i.e., they may reject an otherwise valid request
//! based on a missing approval or failing to match up the request
//! with the signed context requests in the resolver.
//!
//! Since issues involving these are hard to debug, given that they
//! run on user devices, we'd like to report any failure to the
//! servers where they are logged and used to fine-tune policies and
//! the resolver. The information in these reports is already known by
//! the server and we are attaching most of it just for easier
//! collation by capturing the full context.
use crate::pb;
pub struct Reporter {}
impl Reporter {
pub async fn report(r: pb::scheduler::SignerRejection) {
log::warn!("Delivering report {:?}", r);
let tls = crate::tls::TlsConfig::new();
let uri = crate::utils::scheduler_uri();
let channel = tonic::transport::Endpoint::from_shared(uri)
.expect("could not configure client")
.tls_config(tls.inner.clone())
.expect("error configuring client with tls config")
.connect_lazy();
let mut client = pb::scheduler::debug_client::DebugClient::new(channel);
match client.report_signer_rejection(r).await {
Ok(_) => log::info!("rejection reported"),
Err(e) => log::error!("could not report rejection: {}", e),
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/approver.rs | libs/gl-client/src/signer/approver.rs | use lightning_signer::prelude::SendSync;
use vls_protocol_signer::approver::Approve;
// An approver that will collect any request it gets and files a
// report that may be relayed to developers to debug policies. It
// defers actual decisions to an `inner` approver, and provides access
// to the captured reports. If this approver is wrapped in a real
// approver, that outer approver will process the requests, and not
// forward to this. Hence only prospective failures are collected.
pub struct ReportingApprover<A: Approve> {
inner: A,
}
impl<A: Approve> ReportingApprover<A> {
pub fn new(delegate: A) -> Self {
ReportingApprover { inner: delegate }
}
}
impl<A: Approve> Approve for ReportingApprover<A> {
fn approve_invoice(&self, inv: &lightning_signer::invoice::Invoice) -> bool {
log::warn!("unapproved invoice: {:?}", inv);
self.inner.approve_invoice(inv)
}
fn approve_keysend(
&self,
hash: crate::lightning::ln::PaymentHash,
amount_msat: u64,
) -> bool {
log::warn!("unapproved keysend {:?} {:?}", hash, amount_msat);
self.inner.approve_keysend(hash, amount_msat)
}
fn approve_onchain(
&self,
tx: &lightning_signer::bitcoin::Transaction,
values_sat: &[lightning_signer::bitcoin::TxOut],
unknown_indices: &[usize],
) -> bool {
log::warn!(
"unapproved onchain {:?} {:?} {:?}",
tx,
values_sat,
unknown_indices
);
self.inner.approve_onchain(tx, values_sat, unknown_indices)
}
}
impl<A: Approve> SendSync for ReportingApprover<A> {}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/auth.rs | libs/gl-client/src/signer/auth.rs | //! Utilities used to authorize a signature request based on pending RPCs
use std::str::FromStr;
use lightning_signer::invoice::Invoice;
use vls_protocol_signer::approver::Approval;
use crate::signer::model::Request;
use crate::Error;
pub trait Authorizer {
fn authorize(
&self,
requests: &Vec<Request>,
) -> Result<Vec<Approval>, Error>;
}
pub struct GreenlightAuthorizer {}
impl Authorizer for GreenlightAuthorizer {
fn authorize(
&self,
requests: &Vec<Request>,
) -> Result<Vec<Approval>, Error> {
let approvals : Vec<_> = requests.iter().flat_map(|request| {
match request {
Request::Pay(req) => {
// TODO error handling
Some(Approval::Invoice(Invoice::from_str(&req.bolt11)
.expect("")))
}
_ => None,
}
}).collect();
Ok(approvals)
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/mod.rs | libs/gl-client/src/signer/mod.rs | use crate::credentials::{RuneProvider, TlsConfigProvider};
use crate::pb::scheduler::{scheduler_client::SchedulerClient, NodeInfoRequest, UpgradeRequest};
use crate::pb::scheduler::{
signer_request, signer_response, ApprovePairingRequest, ApprovePairingResponse, SignerResponse,
};
use crate::pb::PendingRequest;
/// The core signer system. It runs in a dedicated thread or using the
/// caller thread, streaming incoming requests, verifying them,
/// signing if ok, and then shipping the response to the node.
use crate::pb::{node_client::NodeClient, Empty, HsmRequest, HsmRequestContext, HsmResponse};
use crate::runes;
use crate::signer::resolve::Resolver;
use crate::tls::TlsConfig;
use crate::{node, node::Client};
use anyhow::{anyhow, Result};
use base64::engine::general_purpose;
use base64::Engine;
use bytes::BufMut;
use http::uri::InvalidUri;
use lightning_signer::bitcoin::hashes::Hash;
use lightning_signer::bitcoin::secp256k1::PublicKey;
use lightning_signer::bitcoin::Network;
use lightning_signer::node::NodeServices;
use lightning_signer::policy::filter::FilterRule;
use lightning_signer::util::crypto_utils;
use log::{debug, error, info, trace, warn};
use ring::signature::{UnparsedPublicKey, ECDSA_P256_SHA256_FIXED};
use runeauth::{Condition, Restriction, Rune, RuneError};
use std::convert::{TryFrom, TryInto};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::SystemTime;
use tokio::sync::mpsc;
use tokio::time::{sleep, Duration};
use tokio_stream::wrappers::ReceiverStream;
use tonic::transport::{Endpoint, Uri};
use tonic::{Code, Request};
use vls_protocol::msgs::{DeBolt, HsmdInitReplyV4};
use vls_protocol::serde_bolt::Octets;
use vls_protocol_signer::approver::{Approve, MemoApprover};
use vls_protocol_signer::handler;
use vls_protocol_signer::handler::Handler;
mod approver;
mod auth;
pub mod model;
mod report;
mod resolve;
const VERSION: &str = "v25.05";
const GITHASH: &str = env!("GIT_HASH");
const RUNE_VERSION: &str = "gl0";
// This is the same derivation key that is used by core lightning itself.
const RUNE_DERIVATION_SECRET: &str = "gl-commando";
#[derive(Clone)]
pub struct Signer {
secret: [u8; 32],
master_rune: Rune,
services: NodeServices,
tls: TlsConfig,
id: Vec<u8>,
/// Cached version of the init response
init: Vec<u8>,
network: Network,
state: Arc<Mutex<crate::persist::State>>,
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("could not connect to scheduler: ")]
SchedulerConnection(),
#[error("scheduler returned an error: {0}")]
Scheduler(tonic::Status),
#[error("could not connect to node: {0}")]
NodeConnection(#[from] tonic::transport::Error),
#[error("connection to node lost: {0}")]
NodeDisconnect(#[from] tonic::Status),
#[error("authentication error: {0}")]
Auth(crate::Error),
#[error("scheduler returned faulty URI: {0}")]
InvalidUri(#[from] InvalidUri),
#[error("resolver error: request {0:?}, context: {1:?}")]
Resolver(Vec<u8>, Vec<crate::signer::model::Request>),
#[error("error asking node to be upgraded: {0}")]
Upgrade(tonic::Status),
#[error("protocol error: {0}")]
Protocol(#[from] vls_protocol::Error),
#[error("other: {0}")]
Other(anyhow::Error),
#[error("could not approve pairing request: {0}")]
ApprovePairingRequestError(String),
}
impl Signer {
pub fn new<T>(secret: Vec<u8>, network: Network, creds: T) -> Result<Signer, anyhow::Error>
where
T: TlsConfigProvider,
{
use lightning_signer::policy::{
filter::PolicyFilter, simple_validator::SimpleValidatorFactory,
};
use lightning_signer::signer::ClockStartingTimeFactory;
use lightning_signer::util::clock::StandardClock;
info!("Initializing signer for {VERSION} ({GITHASH}) (VLS)");
let mut sec: [u8; 32] = [0; 32];
sec.copy_from_slice(&secret[0..32]);
// The persister takes care of persisting metadata across
// restarts
let persister = Arc::new(crate::persist::MemoryPersister::new());
let mut policy =
lightning_signer::policy::simple_validator::make_default_simple_policy(network);
policy.filter = PolicyFilter::default();
policy.filter.merge(PolicyFilter {
// TODO: Remove once we have fully switched over to zero-fee anchors
rules: vec![
FilterRule::new_warn("policy-channel-safe-type-anchors"),
FilterRule::new_warn("policy-routing-balanced"),
FilterRule::new_warn("policy-commitment-retry-same"),
],
});
// TODO Remove this once VLS has implemented the fee budget
// per payment, rather than the fee budget per HTLC.
// Ref: https://github.com/Blockstream/greenlight/issues/538
{
policy.max_feerate_per_kw = 150_000;
policy.filter.merge(PolicyFilter {
rules: vec![
FilterRule::new_warn("policy-commitment-fee-range"),
FilterRule::new_warn("policy-mutual-fee-range"),
],
});
}
policy.filter.merge(PolicyFilter {
// TODO: Remove once we have implemented zero invoice support
rules: vec![
FilterRule::new_warn("policy-routing-balanced"),
FilterRule::new_warn("policy-htlc-fee-range"),
],
});
// TODO: Remove once we found the desync issue
policy.filter.merge(PolicyFilter {
rules: vec![
// "policy failure: get_per_commitment_secret: cannot
// revoke commitment_number 312 when
// next_holder_commit_num is 313"
FilterRule::new_warn("policy-revoke-new-commitment-signed"),
],
});
// Increase the invoices limit. Results in a larger state, but
// bumping into this is rather annoying.
policy.max_invoices = 10_000usize;
// Relaxed max_routing_fee since we no longer have the
// presplitter which was causing the HTLCs to be smaller.
policy.max_routing_fee_msat = 1_000_000;
let validator_factory = Arc::new(SimpleValidatorFactory::new_with_policy(policy));
let starting_time_factory = ClockStartingTimeFactory::new();
let clock = Arc::new(StandardClock());
let services = NodeServices {
validator_factory,
starting_time_factory,
persister: persister.clone(),
trusted_oracle_pubkeys: vec![],
clock,
};
let mut handler = handler::HandlerBuilder::new(network, 0 as u64, services.clone(), sec)
.build()
.map_err(|e| anyhow!("building root_handler: {:?}", e))?;
// Calling init on the `InitHandler` from above puts it into a
// state that it can be upgraded into the `RootHandler` that
// we need for the rest of the run.
let init = Signer::initmsg(&mut handler)?;
let init = HsmdInitReplyV4::from_vec(init).unwrap();
let id = init.node_id.0.to_vec();
use vls_protocol::msgs::SerBolt;
let init = init.as_vec();
// Init master rune. We create the rune seed from the nodes
// seed by deriving a hardened key tagged with "rune secret".
let rune_secret = crypto_utils::hkdf_sha256(&sec, RUNE_DERIVATION_SECRET.as_bytes(), &[]);
let mr = Rune::new_master_rune(&rune_secret, vec![], None, Some(RUNE_VERSION.to_string()))?;
trace!("Initialized signer for node_id={}", hex::encode(&id));
Ok(Signer {
secret: sec,
master_rune: mr,
services,
tls: creds.tls_config(),
id,
init,
network,
state: persister.state(),
})
}
fn init_handler(&self) -> Result<handler::InitHandler, anyhow::Error> {
let h = handler::HandlerBuilder::new(
self.network,
0 as u64,
self.services.clone(),
self.secret,
)
.build()
.map_err(|e| anyhow!("building root_handler: {:?}", e))?;
Ok(h)
}
fn handler(&self) -> Result<handler::RootHandler, anyhow::Error> {
let mut h = self.init_handler()?;
h.handle(Signer::initreq())
.expect("handling the hsmd_init message");
Ok(h.into())
}
fn handler_with_approver(
&self,
approver: Arc<dyn Approve>,
) -> Result<handler::RootHandler, Error> {
let mut h = handler::HandlerBuilder::new(
self.network,
0 as u64,
self.services.clone(),
self.secret,
)
.approver(approver)
.build()
.map_err(|e| crate::signer::Error::Other(anyhow!("Could not create handler: {:?}", e)))?;
h.handle(Signer::initreq())
.expect("handling the hsmd_init message");
Ok(h.into())
}
/// Create an `init` request that we can pass to the signer.
fn initreq() -> vls_protocol::msgs::Message {
vls_protocol::msgs::Message::HsmdInit(vls_protocol::msgs::HsmdInit {
key_version: vls_protocol::model::Bip32KeyVersion {
pubkey_version: 0,
privkey_version: 0,
},
chain_params: lightning_signer::bitcoin::BlockHash::all_zeros(),
encryption_key: None,
dev_privkey: None,
dev_bip32_seed: None,
dev_channel_secrets: None,
dev_channel_secrets_shaseed: None,
hsm_wire_min_version: 4,
hsm_wire_max_version: 6,
})
}
fn initmsg(handler: &mut vls_protocol_signer::handler::InitHandler) -> Result<Vec<u8>, Error> {
Ok(handler
.handle(Signer::initreq())
.unwrap()
.1
.map(|a| a.as_vec())
.unwrap_or_default())
}
/// Filter out any request that is not signed, such that the
/// remainder is the minimal set to reconcile state changes
/// against.
///
/// Returns an error if a signature failed verification or if the
/// rune verification failed.
fn check_request_auth(
&self,
requests: Vec<crate::pb::PendingRequest>,
) -> Vec<Result<crate::pb::PendingRequest, anyhow::Error>> {
// Filter out requests lacking a required field. They are unverifiable anyway.
// Todo: partition results to provide more detailed errors.
requests
.into_iter()
.filter(|r| !r.pubkey.is_empty() && !r.signature.is_empty() && !r.rune.is_empty())
.map(|r| {
let pk = UnparsedPublicKey::new(&ECDSA_P256_SHA256_FIXED, &r.pubkey);
let mut data = r.request.clone();
// If we have a timestamp associated we must add it to
// the payload being checked. Same thing happens on
// the client too.
if r.timestamp != 0 {
data.put_u64(r.timestamp);
}
pk.verify(&data, &r.signature)
.map_err(|e| anyhow!("signature verification failed: {}", e))?;
self.verify_rune(r.clone())
.map(|_| r)
.map_err(|e| anyhow!("rune verification failed: {}", e))
})
.collect()
}
/// Verifies that the public key of the request and the signers rune version
/// match the corresponding restrictions of the rune.
fn verify_rune(&self, request: crate::pb::PendingRequest) -> Result<(), anyhow::Error> {
let rune64 = general_purpose::URL_SAFE.encode(request.rune);
let rune = Rune::from_base64(&rune64)?;
// A valid gl-rune must contain a pubkey field as this is bound to the
// signer. Against the rules of runes we do not accept a rune that has
// no restriction on a public key.
if !rune.to_string().contains("pubkey=") {
return Err(anyhow!("rune is missing pubkey field"));
}
// Currently we only use a 0 unique_id and a pubkey field to allow
// for delegation in the future but we could also set the public
// key as the unique_id in the future and add a method that allows
// to create new empty runes.
let unique_id = rune.get_id();
let ver_id = match unique_id {
Some(id) => format!("{}-{}", id, RUNE_VERSION),
None => String::default(),
};
// Check that the request points to `cln.Node`.
let mut parts = request.uri.split('/');
parts.next();
match parts.next() {
Some(service) => {
if service != "cln.Node" && service != "greenlight.Node" {
debug!("request from unknown service {}.", service);
return Err(anyhow!("service {} is not valid", service));
}
}
None => {
debug!("could not extract service from the uri while verifying rune.");
return Err(anyhow!("can not extract service from uri"));
}
};
// Extract the method from the request uri: eg. `/cln.Node/CreateInvoice`
// becomes `createinvoice`.
let method = match parts.next() {
Some(m) => m.to_lowercase(),
None => {
debug!("could not extract method from uri while verifying rune.");
return Err(anyhow!("can not extract uri form request"));
}
};
let ctx = runes::Context {
method,
pubkey: hex::encode(request.pubkey),
time: SystemTime::now(),
unique_id: ver_id,
};
match self.master_rune.check_with_reason(&rune64, ctx) {
Ok(_) => Ok(()),
Err(e) => Err(e.into()),
}
}
/// Given the URI of the running node, connect to it and stream
/// requests from it. The requests are then verified and processed
/// using the `Hsmd`.
pub async fn run_once(&self, node_uri: Uri) -> Result<(), Error> {
info!("Connecting to node at {}", node_uri);
let tls_config = if node_uri.host().unwrap_or_default().contains("blckstrm") {
self.tls.inner.clone()
} else {
self.tls.inner.clone().domain_name("localhost")
};
let c = Endpoint::from_shared(node_uri.to_string())?
.tls_config(tls_config)?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
let mut client = NodeClient::new(c);
let mut stream = client
.stream_hsm_requests(Request::new(Empty::default()))
.await?
.into_inner();
debug!("Starting to stream signer requests");
loop {
let req = match stream
.message()
.await
.map_err(|e| Error::NodeDisconnect(e))?
{
Some(r) => r,
None => {
warn!("Signer request stream ended, the node shouldn't do this.");
return Ok(());
}
};
let request_id = req.request_id;
let hex_req = hex::encode(&req.raw);
let signer_state = req.signer_state.clone();
trace!("Received request {}", hex_req);
match self.process_request(req).await {
Ok(response) => {
trace!("Sending response {}", hex::encode(&response.raw));
client
.respond_hsm_request(response)
.await
.map_err(|e| Error::NodeDisconnect(e))?;
}
Err(e) => {
let response = HsmResponse {
raw: vec![],
request_id,
error: format!("{:?}", e),
signer_state: vec![],
};
client
.respond_hsm_request(response)
.await
.map_err(|e| Error::NodeDisconnect(e))?;
warn!(
"Ignoring error {} for request {} with state {:?}",
e, hex_req, signer_state,
)
}
};
}
}
fn authenticate_request(
&self,
msg: &vls_protocol::msgs::Message,
reqs: &Vec<model::Request>,
) -> Result<(), Error> {
log::trace!(
"Resolving signature request against pending grpc commands: {:?}",
reqs
);
// Quick path out of here: we can't find a resolution for a
// request, then abort!
Resolver::try_resolve(msg, &reqs)?;
Ok(())
}
async fn process_request(&self, req: HsmRequest) -> Result<HsmResponse, Error> {
debug!("Processing request {:?}", req);
let diff: crate::persist::State = req.signer_state.clone().into();
let prestate = {
debug!("Updating local signer state with state from node");
let mut state = self.state.lock().unwrap();
state.merge(&diff).unwrap();
trace!("Processing request {}", hex::encode(&req.raw));
state.clone()
};
// The first two bytes represent the message type. Check that
// it is not a `sign-message` request (type 23).
if let &[h, l, ..] = req.raw.as_slice() {
let typ = ((h as u16) << 8) | (l as u16);
if typ == 23 {
warn!("Refusing to process sign-message request");
return Err(Error::Other(anyhow!(
"Cannot process sign-message requests from node."
)));
}
}
let ctxrequests: Vec<model::Request> = self
.check_request_auth(req.requests.clone())
.into_iter()
.filter_map(|r| r.ok())
.map(|r| decode_request(r))
.filter_map(|r| match r {
Ok(r) => Some(r),
Err(e) => {
log::error!("Unable to decode request in context: {}", e);
None
}
})
.collect::<Vec<model::Request>>();
let msg = vls_protocol::msgs::from_vec(req.raw.clone()).map_err(|e| Error::Protocol(e))?;
log::debug!("Handling message {:?}", msg);
log::trace!("Signer state {}", serde_json::to_string(&prestate).unwrap());
if let Err(e) = self.authenticate_request(&msg, &ctxrequests) {
report::Reporter::report(crate::pb::scheduler::SignerRejection {
msg: e.to_string(),
request: Some(req.clone()),
git_version: GITHASH.to_string(),
node_id: self.node_id(),
})
.await;
#[cfg(not(feature = "permissive"))]
return Err(Error::Resolver(req.raw, ctxrequests));
};
// If present, add the close_to_addr to the allowlist
for parsed_request in ctxrequests.iter() {
match parsed_request {
model::Request::GlConfig(gl_config) => {
let pubkey = PublicKey::from_slice(&self.id);
match pubkey {
Ok(p) => {
let _ = self
.services
.persister
.update_node_allowlist(&p, vec![gl_config.close_to_addr.clone()]);
}
Err(e) => debug!("Could not parse public key {:?}: {:?}", self.id, e),
}
}
_ => {}
}
}
use auth::Authorizer;
let auth = auth::GreenlightAuthorizer {};
let approvals = auth.authorize(&ctxrequests).map_err(|e| Error::Auth(e))?;
debug!("Current approvals: {:?}", approvals);
let approver = Arc::new(MemoApprover::new(approver::ReportingApprover::new(
#[cfg(feature = "permissive")]
vls_protocol_signer::approver::PositiveApprover(),
#[cfg(not(feature = "permissive"))]
vls_protocol_signer::approver::NegativeApprover(),
)));
approver.approve(approvals);
let root_handler = self.handler_with_approver(approver)?;
log::trace!("Updating state from context");
update_state_from_context(&ctxrequests, &root_handler)
.expect("Updating state from context requests");
log::trace!("State updated");
// Match over root and client handler.
let response = match req.context.clone() {
Some(HsmRequestContext { dbid: 0, .. }) | None => {
// This is the main daemon talking to us.
root_handler.handle(msg)
}
Some(c) => {
let pk: [u8; 33] = c.node_id.try_into().unwrap();
let pk = vls_protocol::model::PubKey(pk);
root_handler
.for_new_client(1 as u64, pk, c.dbid)
.handle(msg)
}
};
if let Err(e) = response {
report::Reporter::report(crate::pb::scheduler::SignerRejection {
msg: format!("{:?}", e),
request: Some(req.clone()),
git_version: GITHASH.to_string(),
node_id: self.node_id(),
})
.await;
return Err(Error::Other(anyhow!("processing request: {e:?}")));
}
let response = response.unwrap();
let signer_state: Vec<crate::pb::SignerStateEntry> = {
debug!("Serializing state changes to report to node");
let state = self.state.lock().unwrap();
state.clone().into()
};
Ok(HsmResponse {
raw: response.as_vec(),
request_id: req.request_id,
signer_state,
error: "".to_owned(),
})
}
pub fn node_id(&self) -> Vec<u8> {
self.id.clone()
}
pub fn get_init(&self) -> Vec<u8> {
self.init.clone()
}
/// Retrieve the messages we know `lightningd` will ask when
/// starting. Since we can't be attached during startup, or on
/// background sync runs, we need to stash them at the `scheduler`
/// so we can start without a signer present.
pub fn get_startup_messages(&self) -> Vec<StartupMessage> {
let mut init_handler = self.init_handler().unwrap();
let init = StartupMessage {
request: Signer::initreq().inner().as_vec(),
response: init_handler
.handle(Signer::initreq())
.unwrap()
.1
.map(|a| a.as_vec())
.unwrap_or_default(),
};
let requests = vec![
// v22.11 introduced an addiotiona startup message, the
// bolt12 key generation
vls_protocol::msgs::Message::DeriveSecret(vls_protocol::msgs::DeriveSecret {
info: Octets("bolt12-invoice-base".as_bytes().to_vec()),
}),
// SCB needs a secret derived too
vls_protocol::msgs::Message::DeriveSecret(vls_protocol::msgs::DeriveSecret {
info: Octets("scb secret".as_bytes().to_vec()),
}),
// Commando needs a secret for its runes
vls_protocol::msgs::Message::DeriveSecret(vls_protocol::msgs::DeriveSecret {
info: Octets("commando".as_bytes().to_vec()),
}),
// The node alias key
vls_protocol::msgs::Message::DeriveSecret(vls_protocol::msgs::DeriveSecret {
info: Octets("node-alias-base".as_bytes().to_vec()),
}),
vls_protocol::msgs::Message::DeriveSecret(vls_protocol::msgs::DeriveSecret {
info: Octets("offer-blinded-path".as_bytes().to_vec()),
}),
];
let serialized: Vec<Vec<u8>> = requests.iter().map(|m| m.inner().as_vec()).collect();
let responses: Vec<Vec<u8>> = requests
.into_iter()
.map(|r| self.handler().unwrap().handle(r).unwrap().as_vec())
.collect();
let mut msgs: Vec<StartupMessage> = serialized
.into_iter()
.zip(responses)
.map(|r| {
log::debug!("Storing canned request-response: {:?} -> {:?}", r.0, r.1);
StartupMessage {
request: r.0,
response: r.1,
}
})
.collect();
msgs.insert(0, init);
msgs
}
pub fn bip32_ext_key(&self) -> Vec<u8> {
use vls_protocol::{msgs, msgs::Message};
let initmsg = msgs::from_vec(self.init.clone()).expect("unparseable init message");
match initmsg {
Message::HsmdInit2Reply(m) => m.bip32.0.to_vec(),
Message::HsmdInitReplyV4(m) => m.bip32.0.to_vec(),
Message::HsmdInitReplyV2(m) => m.bip32.0.to_vec(),
m => panic!("Unknown initmsg {:?}, cannot extract bip32 key", m),
}
}
pub fn legacy_bip32_ext_key(&self) -> Vec<u8> {
let mut handler = self.init_handler().expect("retrieving the handler");
let req = vls_protocol::msgs::Message::HsmdInit(vls_protocol::msgs::HsmdInit {
key_version: vls_protocol::model::Bip32KeyVersion {
pubkey_version: 0,
privkey_version: 0,
},
chain_params: lightning_signer::bitcoin::BlockHash::all_zeros(),
encryption_key: None,
dev_privkey: None,
dev_bip32_seed: None,
dev_channel_secrets: None,
dev_channel_secrets_shaseed: None,
hsm_wire_min_version: 1,
hsm_wire_max_version: 2,
});
let initmsg = handler
.handle(req)
.expect("handling legacy init message")
.1
.map(|a| a.as_vec())
.unwrap_or_default();
initmsg[35..].to_vec()
}
/// Connect to the scheduler given by the environment variable
/// `GL_SCHEDULER_GRPC_URI` (of the default URI) and wait for the
/// node to be scheduled. Once scheduled, connect to the node
/// directly and start streaming and processing requests.
pub async fn run_forever(&self, shutdown: mpsc::Receiver<()>) -> Result<(), anyhow::Error> {
let scheduler_uri = crate::utils::scheduler_uri();
debug!("Starting signer run loop");
let res = Self::run_forever_with_uri(&self, shutdown, scheduler_uri).await;
debug!("Exited signer run loop");
res
}
/// Create and, if necessary, upgrade the scheduler
async fn init_scheduler(
&self,
scheduler_uri: String,
) -> Result<SchedulerClient<tonic::transport::channel::Channel>> {
info!("Connecting to scheduler at {scheduler_uri}");
let channel = Endpoint::from_shared(scheduler_uri.clone())?
.tls_config(self.tls.inner.clone())?
.tcp_keepalive(Some(crate::TCP_KEEPALIVE))
.http2_keep_alive_interval(crate::TCP_KEEPALIVE)
.keep_alive_timeout(crate::TCP_KEEPALIVE_TIMEOUT)
.keep_alive_while_idle(true)
.connect_lazy();
let mut scheduler = SchedulerClient::new(channel);
// Upgrade node if necessary.
// If it fails due to connection error, sleep and retry. Re-throw all other errors.
loop {
#[allow(deprecated)]
let res = scheduler
.maybe_upgrade(UpgradeRequest {
initmsg: self.init.clone(),
signer_version: self.version().to_owned(),
startupmsgs: self
.get_startup_messages()
.into_iter()
.map(|s| s.into())
.collect(),
})
.await;
match res {
Err(e) => match e.code() {
Code::Unavailable => {
debug!("Cannot connect to scheduler, sleeping and retrying");
sleep(Duration::from_secs(3)).await;
continue;
}
_ => Err(Error::Upgrade(e))?,
},
Ok(r) => {
debug!("Server reports version {}", r.into_inner().old_version)
}
}
break;
}
Ok(scheduler)
}
/// The core signer loop. Connects to the signer and keeps the connection alive.
///
/// Used as inner loop for `run_forever_with_uri`.
async fn run_forever_inner(
&self,
mut scheduler: SchedulerClient<tonic::transport::channel::Channel>,
) -> Result<(), anyhow::Error> {
loop {
debug!("Calling scheduler.get_node_info");
let node_info_res = scheduler
.get_node_info(NodeInfoRequest {
node_id: self.id.clone(),
// This `wait` parameter means that the scheduler will
// not automatically schedule the node. Rather we are
// telling the scheduler we want to be told as soon as
// the node is being scheduled so we can re-attach to
// that.
wait: true,
})
.await;
let node_info = match node_info_res.map(|v| v.into_inner()) {
Ok(v) => {
debug!("Got node_info from scheduler: {:?}", v);
v
}
Err(e) => {
trace!("Got an error from the scheduler: {e}. Sleeping before retrying");
sleep(Duration::from_millis(1000)).await;
continue;
}
};
if node_info.grpc_uri.is_empty() {
trace!("Got an empty GRPC URI, node is not scheduled, sleeping and retrying");
sleep(Duration::from_millis(1000)).await;
continue;
}
if let Err(e) = self
.run_once(Uri::from_maybe_shared(node_info.grpc_uri)?)
.await
{
warn!("Error running against node: {e}");
}
}
}
pub async fn run_forever_with_uri(
&self,
mut shutdown: mpsc::Receiver<()>,
scheduler_uri: String,
) -> Result<(), anyhow::Error> {
let scheduler = self.init_scheduler(scheduler_uri).await?;
tokio::select! {
run_forever_inner_res = self.run_forever_inner(scheduler.clone()) => {
error!("Inner signer loop exited unexpectedly: {run_forever_inner_res:?}");
},
run_forever_scheduler_res = self.run_forever_scheduler(scheduler) => {
error!("Scheduler signer loop exited unexpectedly: {run_forever_scheduler_res:?}")
}
_ = shutdown.recv() => debug!("Received the signal to exit the signer loop")
};
info!("Exiting the signer loop");
Ok(())
}
async fn run_forever_scheduler(
&self,
scheduler: SchedulerClient<tonic::transport::Channel>,
) -> Result<(), anyhow::Error> {
loop {
if let Err(e) = self.run_once_scheduler(scheduler.clone()).await {
warn!("Error running schduler, trying again: {e}");
}
}
}
async fn run_once_scheduler(
&self,
mut scheduler: SchedulerClient<tonic::transport::Channel>,
) -> Result<(), anyhow::Error> {
let (sender, rx) = mpsc::channel(1);
let outbound = ReceiverStream::new(rx);
// let inbound_future = scheduler.signer_requests_stream(outbound);
let mut stream = scheduler
.signer_requests_stream(outbound)
.await?
.into_inner();
trace!("Starting to stream signer requests from scheduler");
loop {
match stream.message().await {
Ok(Some(msg)) => {
let req_id = msg.request_id;
trace!("Processing scheduler request {}", req_id);
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | true |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/resolve.rs | libs/gl-client/src/signer/resolve.rs | //! Resolver utilities to match incoming requests against the request
//! context and find a justifications.
use crate::signer::{model::Request, Error};
use vls_protocol::msgs::Message;
pub struct Resolver {}
impl Resolver {
/// Attempt to find a resolution for a given request. We default
/// to failing, and allowlist individual matches between pending
/// context requests and the signer request being resolved. Where
/// possible we also verify the contents of the request against
/// the contents of the context request. TODOs in here may
/// indicate ways to strengthen the verification.
pub fn try_resolve(req: &Message, reqctx: &Vec<Request>) -> Result<(), Error> {
log::trace!("Resolving {:?}", req);
// Some requests do not need a justification. For example we
// reconnect automatically, so there may not even be a context
// request pending which would skip the entire stack below, so
// we do an early pass:
let accept = match req {
// Commands that simply have no context to check against
Message::GetHeartbeat(_) => true,
Message::Ecdh(_) => true,
Message::Ping(_) => true,
Message::Pong(_) => true,
Message::SignChannelAnnouncement(_) => true,
Message::SignChannelUpdate(_) => true,
Message::SignNodeAnnouncement(_) => true,
Message::CheckPubKey(_) => true,
// Duplicate verification with VLS, we defer to VLS
Message::GetChannelBasepoints(_) => true,
Message::ValidateCommitmentTx(_) => true,
Message::SignWithdrawal(_) => true,
Message::SetupChannel(_) => true,
Message::GetPerCommitmentPoint(_) => true,
Message::ValidateRevocation(_) => true,
Message::NewChannel(_) => true,
Message::SignCommitmentTx(_) => true,
Message::SignGossipMessage(_) => true,
Message::SignMutualCloseTx(_) => true,
Message::SignMutualCloseTx2(_) => true,
Message::SignRemoteCommitmentTx(_) => true,
Message::SignRemoteCommitmentTx2(_) => true,
Message::SignRemoteHtlcTx(_) => true,
// Resolution of an existing HTLC, we should never not try
// to grab funds if we can.
Message::SignPenaltyToUs(_) => true,
Message::SignAnyPenaltyToUs(_) => true,
Message::SignAnyDelayedPaymentToUs(_) => true,
Message::SignAnyLocalHtlcTx(_) => true,
Message::SignAnyRemoteHtlcToUs(_) => true,
Message::LockOutpoint(_) => true,
Message::CheckOutpoint(_) => true,
Message::SignAnyChannelAnnouncement(_) => true,
Message::RevokeCommitmentTx(_) => true,
Message::ForgetChannel(_) => true,
// Default to rejecting, punting the decision to the next
// step.
_ => false,
};
// If we found a resolution, then there is no point in trying
// to match up further.
if accept {
log::trace!(
"Request {:?} resolved with no context request required",
req
);
return Ok(());
}
for cr in reqctx {
let accept = match (req, cr) {
(Message::SignMessage(m1), Request::SignMessage(m2)) => {
m1.message.0.clone() == m2.message.as_bytes()
}
(Message::NewChannel(m1), Request::FundChannel(m2)) => {
// Different node_id? Reject!
m1.peer_id.0 == m2.id.as_slice()
// TODO: Add `close_to` to allowlist for the close
// later on
}
(Message::SignInvoice(_l), Request::LspInvoice(_r)) => {
// TODO: This could also need some
// strengthening. See below.
true
}
(Message::SignInvoice(_l), Request::Invoice(_r)) => {
// TODO: This could be strengthened by parsing the
// invoice from `l.u5bytes` and verify the
// description, amount and (maybe) payment_hash
true
}
(Message::PreapproveInvoice(l), Request::Pay(r)) => {
l.invstring.0 == r.bolt11.as_bytes()
}
(Message::PreapproveInvoice(l), Request::PreApproveInvoice(r)) => {
// Manually calling preapproveinvoice should
// always be allowed. The bolt11 string have to
// match.
l.invstring.0 == r.bolt11.as_bytes()
}
(Message::PreapproveInvoice(l), Request::TrampolinePay(r)) => {
l.invstring.0 == r.bolt11.as_bytes()
}
(_, _) => false,
};
// Did we find a resolution? If yes we can stop here.
if accept {
log::trace!("Request {:?} approved with context request {:?}", req, cr);
return Ok(());
}
}
let ser = req.inner().as_vec();
Err(Error::Resolver(ser, reqctx.to_vec()))
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/model/greenlight.rs | libs/gl-client/src/signer/model/greenlight.rs | // Decoding support for the legacy `greenlight.proto` models and
// methods. This will be mostly deprecated as we go.
use super::Request;
pub use crate::pb::*;
use anyhow::anyhow;
use prost::Message;
pub fn decode_request(uri: &str, p: &[u8]) -> anyhow::Result<Request> {
Ok(match uri {
"/greenlight.Node/Configure" => Request::GlConfig(crate::pb::GlConfig::decode(p)?),
"/greenlight.Node/TrampolinePay" => {
Request::TrampolinePay(crate::pb::TrampolinePayRequest::decode(p)?)
}
"/greenlight.Node/LspInvoice" => {
Request::LspInvoice(crate::pb::LspInvoiceRequest::decode(p)?)
}
uri => return Err(anyhow!("Unknown URI {}, can't decode payload", uri)),
})
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/model/mod.rs | libs/gl-client/src/signer/model/mod.rs | // This file was generated by `gengrpc` from the CLN JSON-Schema.
// Do not edit this file.
//
pub mod cln;
pub mod greenlight;
/// Variants prefixed with `Gl` are deprecated and will eventually be removed.
#[derive(Clone, Debug)]
pub enum Request {
GlConfig(greenlight::GlConfig),
LspInvoice(greenlight::LspInvoiceRequest),
Getinfo(cln::GetinfoRequest),
ListPeers(cln::ListpeersRequest),
ListFunds(cln::ListfundsRequest),
SendPay(cln::SendpayRequest),
ListChannels(cln::ListchannelsRequest),
AddGossip(cln::AddgossipRequest),
AutoCleanOnce(cln::AutocleanonceRequest),
CheckMessage(cln::CheckmessageRequest),
Close(cln::CloseRequest),
Connect(cln::ConnectRequest),
CreateInvoice(cln::CreateinvoiceRequest),
Datastore(cln::DatastoreRequest),
CreateOnion(cln::CreateonionRequest),
DelDatastore(cln::DeldatastoreRequest),
DelExpiredInvoice(cln::DelinvoiceRequest),
DelInvoice(cln::DelinvoiceRequest),
Invoice(cln::InvoiceRequest),
ListDatastore(cln::ListdatastoreRequest),
ListInvoices(cln::ListinvoicesRequest),
SendOnion(cln::SendonionRequest),
ListSendPays(cln::ListsendpaysRequest),
ListTransactions(cln::ListtransactionsRequest),
Pay(cln::PayRequest),
PreApproveInvoice(cln::PreapproveinvoiceRequest),
ListNodes(cln::ListnodesRequest),
WaitAnyInvoice(cln::WaitanyinvoiceRequest),
WaitInvoice(cln::WaitinvoiceRequest),
WaitSendPay(cln::WaitsendpayRequest),
NewAddr(cln::NewaddrRequest),
Withdraw(cln::WithdrawRequest),
KeySend(cln::KeysendRequest),
FundPsbt(cln::FundpsbtRequest),
SendPsbt(cln::SendpsbtRequest),
SignPsbt(cln::SignpsbtRequest),
UtxoPsbt(cln::UtxopsbtRequest),
TxDiscard(cln::TxdiscardRequest),
TxPrepare(cln::TxprepareRequest),
TxSend(cln::TxsendRequest),
Disconnect(cln::DisconnectRequest),
Feerates(cln::FeeratesRequest),
FundChannel(cln::FundchannelRequest),
GetRoute(cln::GetrouteRequest),
ListForwards(cln::ListforwardsRequest),
ListPays(cln::ListpaysRequest),
Ping(cln::PingRequest),
SetChannel(cln::SetchannelRequest),
SignMessage(cln::SignmessageRequest),
FetchInvoice(cln::FetchinvoiceRequest),
Stop(cln::StopRequest),
ListClosedChannels(cln::ListclosedchannelsRequest),
StaticBackup(cln::StaticbackupRequest),
TrampolinePay(greenlight::TrampolinePayRequest),
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-client/src/signer/model/cln.rs | libs/gl-client/src/signer/model/cln.rs | //
// This file was generated by `gengrpc` from the CLN JSON-Schema.
// Do not edit this file.
//
use super::Request;
pub use crate::pb::cln::*;
use anyhow::anyhow;
use prost::Message;
pub fn decode_request(uri: &str, p: &[u8]) -> anyhow::Result<Request> {
Ok(match uri {
"/cln.Node/Getinfo" => Request::Getinfo(GetinfoRequest::decode(p)?),
"/cln.Node/ListPeers" => Request::ListPeers(ListpeersRequest::decode(p)?),
"/cln.Node/ListFunds" => Request::ListFunds(ListfundsRequest::decode(p)?),
"/cln.Node/SendPay" => Request::SendPay(SendpayRequest::decode(p)?),
"/cln.Node/ListChannels" => Request::ListChannels(ListchannelsRequest::decode(p)?),
"/cln.Node/AddGossip" => Request::AddGossip(AddgossipRequest::decode(p)?),
"/cln.Nonde/AutoCleanOnce" => Request::AutoCleanOnce(AutocleanonceRequest::decode(p)?),
"/cln.Node/CheckMessage" => Request::CheckMessage(CheckmessageRequest::decode(p)?),
"/cln.Node/Close" => Request::Close(CloseRequest::decode(p)?),
"/cln.Node/ConnectPeer" => Request::Connect(ConnectRequest::decode(p)?),
"/cln.Node/CreateInvoice" => Request::CreateInvoice(CreateinvoiceRequest::decode(p)?),
"/cln.Node/Datastore" => Request::Datastore(DatastoreRequest::decode(p)?),
"/cln.Node/CreateOnion" => Request::CreateOnion(CreateonionRequest::decode(p)?),
"/cln.Node/DelDatastore" => Request::DelDatastore(DeldatastoreRequest::decode(p)?),
"/cln.Node/DelExpiredInvoice" => Request::DelExpiredInvoice(DelinvoiceRequest::decode(p)?),
"/cln.Node/DelInvoice" => Request::DelInvoice(DelinvoiceRequest::decode(p)?),
"/cln.Node/Invoice" => Request::Invoice(InvoiceRequest::decode(p)?),
"/cln.Node/ListDatastore" => Request::ListDatastore(ListdatastoreRequest::decode(p)?),
"/cln.Node/ListInvoices" => Request::ListInvoices(ListinvoicesRequest::decode(p)?),
"/cln.Node/SendOnion" => Request::SendOnion(SendonionRequest::decode(p)?),
"/cln.Node/ListSendPays" => Request::ListSendPays(ListsendpaysRequest::decode(p)?),
"/cln.Node/ListTransactions" => {
Request::ListTransactions(ListtransactionsRequest::decode(p)?)
}
"/cln.Node/Pay" => Request::Pay(PayRequest::decode(p)?),
"/cln.Node/ListNodes" => Request::ListNodes(ListnodesRequest::decode(p)?),
"/cln.Node/WaitAnyInvoice" => Request::WaitAnyInvoice(WaitanyinvoiceRequest::decode(p)?),
"/cln.Node/WaitInvoice" => Request::WaitInvoice(WaitinvoiceRequest::decode(p)?),
"/cln.Node/WaitSendPay" => Request::WaitSendPay(WaitsendpayRequest::decode(p)?),
"/cln.Node/NewAddr" => Request::NewAddr(NewaddrRequest::decode(p)?),
"/cln.Node/Withdraw" => Request::Withdraw(WithdrawRequest::decode(p)?),
"/cln.Node/KeySend" => Request::KeySend(KeysendRequest::decode(p)?),
"/cln.Node/FundPsbt" => Request::FundPsbt(FundpsbtRequest::decode(p)?),
"/cln.Node/SendPsbt" => Request::SendPsbt(SendpsbtRequest::decode(p)?),
"/cln.Node/SignPsbt" => Request::SignPsbt(SignpsbtRequest::decode(p)?),
"/cln.Node/UtxoPsbt" => Request::UtxoPsbt(UtxopsbtRequest::decode(p)?),
"/cln.Node/TxDiscard" => Request::TxDiscard(TxdiscardRequest::decode(p)?),
"/cln.Node/TxPrepare" => Request::TxPrepare(TxprepareRequest::decode(p)?),
"/cln.Node/TxSend" => Request::TxSend(TxsendRequest::decode(p)?),
"/cln.Node/Disconnect" => Request::Disconnect(DisconnectRequest::decode(p)?),
"/cln.Node/Feerates" => Request::Feerates(FeeratesRequest::decode(p)?),
"/cln.Node/FundChannel" => Request::FundChannel(FundchannelRequest::decode(p)?),
"/cln.Node/GetRoute" => Request::GetRoute(GetrouteRequest::decode(p)?),
"/cln.Node/ListForwards" => Request::ListForwards(ListforwardsRequest::decode(p)?),
"/cln.Node/ListPays" => Request::ListPays(ListpaysRequest::decode(p)?),
"/cln.Node/Ping" => Request::Ping(PingRequest::decode(p)?),
"/cln.Node/SetChannel" => Request::SetChannel(SetchannelRequest::decode(p)?),
"/cln.Node/SignMessage" => Request::SignMessage(SignmessageRequest::decode(p)?),
"/cln.Node/FetchInvoice" => Request::FetchInvoice(FetchinvoiceRequest::decode(p)?),
"/cln.Node/Stop" => Request::Stop(StopRequest::decode(p)?),
"/cln.Node/ListClosedChannels" => {
Request::ListClosedChannels(ListclosedchannelsRequest::decode(p)?)
}
"/cln.Node/StaticBackup" => Request::StaticBackup(StaticbackupRequest::decode(p)?),
"/cln.Node/PreApproveInvoice" => {
Request::PreApproveInvoice(PreapproveinvoiceRequest::decode(p)?)
}
uri => return Err(anyhow!("Unknown URI {}, can't decode payload", uri)),
})
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-sdk/src/signer.rs | libs/gl-sdk/src/signer.rs | use crate::{Credentials, Error};
use bip39::Mnemonic;
use std::str::FromStr;
use tracing;
#[derive(uniffi::Object, Clone)]
pub struct Signer {
seed: Vec<u8>,
pub(crate) inner: gl_client::signer::Signer,
credentials: Option<Credentials>,
}
#[uniffi::export]
impl Signer {
#[uniffi::constructor()]
fn new(phrase: String) -> Result<Signer, Error> {
let phrase = Mnemonic::from_str(phrase.as_str()).map_err(|_e| Error::PhraseCorrupted())?;
let seed = phrase.to_seed_normalized(&"").to_vec();
// FIXME: We may need to give the signer real credentials to
// talk to the node too.
let credentials = gl_client::credentials::Nobody::new();
let inner = gl_client::signer::Signer::new(
seed.clone(),
gl_client::bitcoin::Network::Bitcoin,
credentials,
)
.map_err(|e| Error::Other(e.to_string()))?;
let credentials = None;
Ok(Signer {
seed,
inner,
credentials,
})
}
fn authenticate(&self, creds: &Credentials) -> Result<Signer, Error> {
let credentials = Some(creds.clone());
let inner = gl_client::signer::Signer::new(
self.seed.clone(),
gl_client::bitcoin::Network::Bitcoin,
creds.inner.clone(),
)
.map_err(|e| Error::Other(e.to_string()))?;
Ok(Signer {
inner,
credentials,
..self.clone()
})
}
fn start(&self) -> Result<Handle, Error> {
let (tx, rx) = tokio::sync::mpsc::channel(1);
let inner = self.inner.clone();
std::thread::spawn(move || {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("building tokio runtime");
runtime.block_on(async move {
if let Err(e) = inner.run_forever(rx).await {
tracing::error!("Error running signer in thread: {e}")
}
})
});
Ok(Handle { chan: tx })
}
fn node_id(&self) -> Vec<u8> {
self.inner.node_id()
}
}
// Not exported through uniffi, internal logic only.
impl Signer {
async fn run(&self, signal: tokio::sync::mpsc::Receiver<()>) {
self.inner
.run_forever(signal)
.await
.expect("Error running signer loop");
}
}
/// A handle to interact with a signer loop running and processing
/// requests in the background. Used primarily to stop the loop and
/// exiting the signer.
#[derive(uniffi::Object, Clone)]
pub struct Handle {
chan: tokio::sync::mpsc::Sender<()>,
}
#[uniffi::export]
impl Handle {
pub fn stop(&self) {
self.chan.try_send(()).expect("sending shutdown signal");
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-sdk/src/node.rs | libs/gl-sdk/src/node.rs | use crate::{credentials::Credentials, util::exec, Error};
use gl_client::credentials::NodeIdProvider;
use gl_client::node::{Client as GlClient, ClnClient, Node as ClientNode};
use gl_client::pb::cln as clnpb;
use tokio::sync::OnceCell;
/// The `Node` is an RPC stub representing the node running in the
/// cloud. It is the main entrypoint to interact with the node.
#[derive(uniffi::Object, Clone)]
#[allow(unused)]
pub struct Node {
inner: ClientNode,
cln_client: OnceCell<ClnClient>,
gl_client: OnceCell<GlClient>,
}
#[uniffi::export]
impl Node {
#[uniffi::constructor()]
pub fn new(credentials: &Credentials) -> Result<Self, Error> {
let node_id = credentials
.inner
.node_id()
.map_err(|_e| Error::UnparseableCreds())?;
let inner = ClientNode::new(node_id, credentials.inner.clone())
.expect("infallible client instantiation");
let cln_client = OnceCell::const_new();
let gl_client = OnceCell::const_new();
Ok(Node {
inner,
cln_client,
gl_client,
})
}
/// Stop the node if it is currently running.
pub fn stop(&self) -> Result<(), Error> {
let mut cln_client = exec(self.get_cln_client())?.clone();
let req = clnpb::StopRequest {};
// It's ok, the error here is expected and should just be
// telling us that we've lost the connection. This is to
// be expected on shutdown, so we clamp this to success.
let _ = exec(cln_client.stop(req));
Ok(())
}
/// Receive an off-chain payment.
///
/// This method generates a request for a payment, also called an
/// invoice, that encodes all the information, including amount
/// and destination, for a prospective sender to send a lightning
/// payment. The invoice includes negotiation of an LSPS2 / JIT
/// channel, meaning that if there is no channel sufficient to
/// receive the requested funds, the node will negotiate an
/// opening, and when/if executed the payment will cause a channel
/// to be created, and the incoming payment to be forwarded.
fn receive(
&self,
label: String,
description: String,
amount_msat: Option<u64>,
) -> Result<ReceiveResponse, Error> {
let mut gl_client = exec(self.get_gl_client())?.clone();
let req = gl_client::pb::LspInvoiceRequest {
amount_msat: amount_msat.unwrap_or_default(),
description: description,
label: label,
lsp_id: "".to_owned(),
token: "".to_owned(),
};
let res = exec(gl_client.lsp_invoice(req))
.map_err(|s| Error::Rpc(s.to_string()))?
.into_inner();
Ok(ReceiveResponse { bolt11: res.bolt11 })
}
fn send(&self, invoice: String, amount_msat: Option<u64>) -> Result<SendResponse, Error> {
let mut cln_client = exec(self.get_cln_client())?.clone();
let req = clnpb::PayRequest {
amount_msat: match amount_msat {
Some(a) => Some(clnpb::Amount { msat: a }),
None => None,
},
bolt11: invoice,
description: None,
exclude: vec![],
exemptfee: None,
label: None,
localinvreqid: None,
maxdelay: None,
maxfee: None,
maxfeepercent: None,
partial_msat: None,
retry_for: None,
riskfactor: None,
};
exec(cln_client.pay(req))
.map_err(|e| Error::Rpc(e.to_string()))
.map(|r| r.into_inner().into())
}
fn onchain_send(
&self,
destination: String,
amount_or_all: String,
) -> Result<OnchainSendResponse, Error> {
let mut cln_client = exec(self.get_cln_client())?.clone();
// Decode what the user intends to do. Either we have `all`,
// or we have an amount that we can parse.
let (num, suffix): (String, String) = amount_or_all.chars().partition(|c| c.is_digit(10));
let num = if num.len() > 0 {
num.parse::<u64>().unwrap()
} else {
0
};
let satoshi = match (num, suffix.as_ref()) {
(n, "") | (n, "sat") => clnpb::AmountOrAll {
// No value suffix, interpret as satoshis. This is an
// onchain RPC method, hence the sat denomination by
// default.
value: Some(clnpb::amount_or_all::Value::Amount(clnpb::Amount {
msat: n * 1000,
})),
},
(n, "msat") => clnpb::AmountOrAll {
// No value suffix, interpret as satoshis. This is an
// onchain RPC method, hence the sat denomination by
// default.
value: Some(clnpb::amount_or_all::Value::Amount(clnpb::Amount {
msat: n * 1000,
})),
},
(0, "all") => clnpb::AmountOrAll {
value: Some(clnpb::amount_or_all::Value::All(true)),
},
(_, _) => return Err(Error::Argument("amount_or_all".to_owned(), amount_or_all)),
};
let req = clnpb::WithdrawRequest {
destination: destination,
minconf: None,
feerate: None,
satoshi: Some(satoshi),
utxos: vec![],
};
exec(cln_client.withdraw(req))
.map_err(|e| Error::Rpc(e.to_string()))
.map(|r| r.into_inner().into())
}
fn onchain_receive(&self) -> Result<OnchainReceiveResponse, Error> {
let mut cln_client = exec(self.get_cln_client())?.clone();
let req = clnpb::NewaddrRequest {
addresstype: Some(clnpb::newaddr_request::NewaddrAddresstype::All.into()),
};
let res = exec(cln_client.new_addr(req))
.map_err(|e| Error::Rpc(e.to_string()))?
.into_inner();
Ok(res.into())
}
}
// Not exported through uniffi
impl Node {
async fn get_gl_client<'a>(&'a self) -> Result<&'a GlClient, Error> {
let inner = self.inner.clone();
self.gl_client
.get_or_try_init(|| async { inner.schedule::<GlClient>().await })
.await
.map_err(|e| Error::Rpc(e.to_string()))
}
async fn get_cln_client<'a>(&'a self) -> Result<&'a ClnClient, Error> {
let inner = self.inner.clone();
self.cln_client
.get_or_try_init(|| async { inner.schedule::<ClnClient>().await })
.await
.map_err(|e| Error::Rpc(e.to_string()))
}
}
#[allow(unused)]
#[derive(uniffi::Object)]
struct OnchainSendResponse {
tx: Vec<u8>,
txid: Vec<u8>,
psbt: String,
}
impl From<clnpb::WithdrawResponse> for OnchainSendResponse {
fn from(other: clnpb::WithdrawResponse) -> Self {
Self {
tx: other.tx,
txid: other.txid,
psbt: other.psbt,
}
}
}
#[allow(unused)]
#[derive(uniffi::Object)]
struct OnchainReceiveResponse {
bech32: String,
p2tr: String,
}
impl From<clnpb::NewaddrResponse> for OnchainReceiveResponse {
fn from(other: clnpb::NewaddrResponse) -> Self {
OnchainReceiveResponse {
bech32: other.bech32.unwrap_or_default(),
p2tr: other.p2tr.unwrap_or_default(),
}
}
}
#[allow(unused)]
#[derive(uniffi::Object)]
struct SendResponse {
status: PayStatus,
preimage: Vec<u8>,
amount_msat: u64,
amount_sent_msat: u64,
parts: u32,
}
impl From<clnpb::PayResponse> for SendResponse {
fn from(other: clnpb::PayResponse) -> Self {
Self {
status: other.status.into(),
preimage: other.payment_preimage,
amount_msat: other.amount_msat.unwrap().msat,
amount_sent_msat: other.amount_sent_msat.unwrap().msat,
parts: other.parts,
}
}
}
#[allow(unused)]
#[derive(uniffi::Object)]
struct ReceiveResponse {
bolt11: String,
}
#[derive(uniffi::Enum)]
enum PayStatus {
COMPLETE = 0,
PENDING = 1,
FAILED = 2,
}
impl From<clnpb::pay_response::PayStatus> for PayStatus {
fn from(other: clnpb::pay_response::PayStatus) -> Self {
match other {
clnpb::pay_response::PayStatus::Complete => PayStatus::COMPLETE,
clnpb::pay_response::PayStatus::Failed => PayStatus::FAILED,
clnpb::pay_response::PayStatus::Pending => PayStatus::PENDING,
}
}
}
impl From<i32> for PayStatus {
fn from(i: i32) -> Self {
match i {
0 => PayStatus::COMPLETE,
1 => PayStatus::PENDING,
2 => PayStatus::FAILED,
o => panic!("Unknown pay_status {}", o),
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-sdk/src/lib.rs | libs/gl-sdk/src/lib.rs | uniffi::setup_scaffolding!();
#[derive(uniffi::Error, thiserror::Error, Debug)]
pub enum Error {
#[error("There is already a node for node_id={0}, maybe you want to recover?")]
DuplicateNode(String),
#[error("There is no node with node_id={0}, maybe you need to register first?")]
NoSuchNode(String),
#[error("The provided credentials could not be parsed, please recover.")]
UnparseableCreds(),
#[error("The passphrase you provided fails the checksum")]
PhraseCorrupted(),
#[error("Error calling the rpc: {0}")]
Rpc(String),
#[error("Invalid argument: {0}={1}")]
Argument(String, String),
#[error("Generic error: {0}")]
Other(String),
}
mod credentials;
mod node;
mod scheduler;
mod signer;
mod util;
pub use crate::{
credentials::Credentials,
node::Node,
scheduler::Scheduler,
signer::{Handle, Signer},
};
#[derive(uniffi::Enum, Debug)]
pub enum Network {
BITCOIN,
REGTEST,
}
impl From<Network> for gl_client::bitcoin::Network {
fn from(n: Network) -> gl_client::bitcoin::Network {
match n {
Network::BITCOIN => gl_client::bitcoin::Network::Bitcoin,
Network::REGTEST => gl_client::bitcoin::Network::Regtest,
}
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-sdk/src/util.rs | libs/gl-sdk/src/util.rs | use ::tokio::runtime::{Builder, Runtime};
use once_cell::sync::OnceCell;
use std::future::Future;
static TOKIO_RUNTIME: OnceCell<Runtime> = OnceCell::new();
pub(crate) fn get_runtime<'a>() -> &'a Runtime {
TOKIO_RUNTIME.get_or_init(|| {
let mut builder = Builder::new_multi_thread();
builder.enable_all();
builder.build().expect("Unable to build Tokio runtime")
})
}
pub(crate) fn exec<F, T>(f: F) -> T
where
F: Future<Output = T> + Sized + Send,
T: Send,
{
get_runtime().block_on(f)
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-sdk/src/credentials.rs | libs/gl-sdk/src/credentials.rs | use crate::Error;
use gl_client::credentials::Device as DeviceCredentials;
/// `Credentials` is a container for `node_id`, the mTLS client
/// certificate used to authenticate a client against a node, as well
/// as the seed secret if present. If no seed is present in the
/// credentials, then the `Client` will not start a signer in the
/// background.
#[derive(uniffi::Object, Clone)]
pub struct Credentials {
pub(crate) inner: DeviceCredentials,
}
#[uniffi::export]
impl Credentials {
#[uniffi::constructor()]
pub(crate) fn load(raw: Vec<u8>) -> Result<Credentials, Error> {
Ok(Self {
inner: DeviceCredentials::from_bytes(raw),
})
}
pub(crate) fn save(&self) -> Result<Vec<u8>, Error> {
Ok(self.inner.to_bytes())
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-sdk/src/scheduler.rs | libs/gl-sdk/src/scheduler.rs | use crate::{credentials::Credentials, signer::Signer, util::exec, Error};
#[derive(uniffi::Object, Clone)]
pub struct Scheduler {
credentials: Option<Credentials>,
network: gl_client::bitcoin::Network,
}
#[uniffi::export]
impl Scheduler {
/// Create a `Scheduler` instance configured with the Greenlight
/// production service pre-configured.
#[uniffi::constructor()]
pub fn new(network: crate::Network) -> Result<Scheduler, Error> {
// We use the nobody credentials since there is no
// authenticated method we expose at the moment.
let creds = None;
let network: gl_client::bitcoin::Network = network.into();
Ok(Scheduler {
credentials: creds,
network,
})
}
pub fn register(&self, signer: &Signer, code: Option<String>) -> Result<Credentials, Error> {
exec(async move {
let inner = gl_client::scheduler::Scheduler::new(
self.network,
gl_client::credentials::Nobody::new(),
)
.await
.map_err(|e| Error::Other(e.to_string()))?;
let res = inner
.register(&signer.inner, code)
.await
.map_err(|e| Error::Other(e.to_string().clone()))?;
Credentials::load(res.creds).map_err(|_e| Error::UnparseableCreds())
})
}
pub fn recover(&self, signer: &Signer) -> Result<Credentials, Error> {
exec(async move {
let inner = gl_client::scheduler::Scheduler::new(
self.network,
gl_client::credentials::Nobody::new(),
)
.await
.map_err(|e| Error::Other(e.to_string()))?;
let res = inner
.recover(&signer.inner)
.await
.map_err(|e| Error::Other(e.to_string()))?;
Credentials::load(res.creds).map_err(|_e| Error::UnparseableCreds())
})
}
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Blockstream/greenlight | https://github.com/Blockstream/greenlight/blob/28e98f8e093100e576415734630cff4ea9f3421e/libs/gl-cli/src/signer.rs | libs/gl-cli/src/signer.rs | use crate::error::{Error, Result};
use crate::util;
use clap::Subcommand;
use core::fmt::Debug;
use gl_client::signer::Signer;
use lightning_signer::bitcoin::Network;
use std::path::Path;
use tokio::{join, signal};
use util::{CREDENTIALS_FILE_NAME, SEED_FILE_NAME};
pub struct Config<P: AsRef<Path>> {
pub data_dir: P,
pub network: Network,
}
#[derive(Subcommand, Debug)]
pub enum Command {
/// Starts a signer that connects to greenlight
Run,
/// Prints the version of the signer used
Version,
}
pub async fn command_handler<P: AsRef<Path>>(cmd: Command, config: Config<P>) -> Result<()> {
match cmd {
Command::Run => run_handler(config).await,
Command::Version => version(config).await,
}
}
async fn run_handler<P: AsRef<Path>>(config: Config<P>) -> Result<()> {
// Check if we can find a seed file, if we can not find one, we need to register first.
let seed_path = config.data_dir.as_ref().join(SEED_FILE_NAME);
let seed = util::read_seed(&seed_path);
if seed.is_none() {
println!("Seed not found");
return Err(Error::SeedNotFoundError(format!(
"could not read from {}",
seed_path.display()
)));
}
let seed = seed.unwrap(); // we checked if it is none before.
// Initialize a signer and scheduler with default credentials.
let creds_path = config.data_dir.as_ref().join(CREDENTIALS_FILE_NAME);
let creds = match util::read_credentials(&creds_path) {
Some(c) => c,
None => {
return Err(Error::CredentialsNotFoundError(format!(
"could not read from {}",
creds_path.display()
)))
}
};
let signer = Signer::new(seed, config.network, creds.clone())
.map_err(|e| Error::custom(format!("Failed to create signer: {}", e)))?;
let (tx, rx) = tokio::sync::mpsc::channel(1);
let handle = tokio::spawn(async move {
let _ = signer.run_forever(rx).await;
});
_ = signal::ctrl_c().await.map_err(|e| Error::custom(e))?;
_ = tx.send(()).await;
_ = join!(handle);
Ok(())
}
async fn version<P: AsRef<Path>>(config: Config<P>) -> Result<()> {
// Check if we can find a seed file, if we can not find one, we need to register first.
let seed_path = config.data_dir.as_ref().join(SEED_FILE_NAME);
let seed = util::read_seed(&seed_path);
if seed.is_none() {
println!("Seed not found");
return Err(Error::SeedNotFoundError(format!(
"could not read from {}",
seed_path.display()
)));
}
let seed = seed.unwrap(); // we checked if it is none before.
// Initialize a signer and scheduler with default credentials.
let creds = gl_client::credentials::Nobody::new();
let signer = gl_client::signer::Signer::new(seed, config.network, creds.clone())
.map_err(|e| Error::custom(format!("Failed to create signer: {}", e)))?;
println!("{}", signer.version());
Ok(())
}
| rust | MIT | 28e98f8e093100e576415734630cff4ea9f3421e | 2026-01-04T20:21:42.079439Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.