repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/src/load.rs | src/load.rs | use std::io::{self, Read};
use std::marker::PhantomData;
use std::path::PathBuf;
use bevy_scene::DynamicScene;
use moonshine_util::expect::{expect_deferred, ExpectDeferred};
use moonshine_util::Static;
use serde::de::DeserializeSeed;
use bevy_ecs::entity::EntityHashMap;
use bevy_ecs::prelude::*;
use bevy_ecs::query::QueryFilter;
use bevy_log::prelude::*;
use bevy_scene::{ron, serde::SceneDeserializer, SceneSpawnError};
use moonshine_util::event::{OnSingle, SingleEvent, TriggerSingle};
use thiserror::Error;
use crate::save::Save;
use crate::{MapComponent, SceneMapper};
/// A [`Component`] which marks its [`Entity`] to be despawned prior to load.
///
/// # Usage
/// When saving game state, it is often undesirable to save visual and aesthetic elements of the game.
/// Elements such as transforms, camera settings, scene hierarchy, or UI elements are typically either
/// spawned at game start, or added during initialization of the game data they represent.
///
/// This component may be used on such entities to despawn them prior to loading.
///
/// # Example
/// ```
/// use bevy::prelude::*;
/// use moonshine_save::prelude::*;
///
/// #[derive(Bundle)]
/// struct PlayerBundle {
/// player: Player,
/// /* Saved Player Data */
/// save: Save,
/// }
///
/// #[derive(Component, Default, Reflect)]
/// #[reflect(Component)]
/// struct Player;
///
/// #[derive(Component)] // <-- Not serialized!
/// struct PlayerSprite(Entity);
///
/// #[derive(Bundle, Default)]
/// struct PlayerSpriteBundle {
/// /* Player Visuals/Aesthetics */
/// unload: Unload,
/// }
///
/// fn spawn_player_sprite(query: Query<Entity, Added<Player>>, mut commands: Commands) {
/// for entity in &query {
/// let sprite = PlayerSprite(commands.spawn(PlayerSpriteBundle::default()).id());
/// commands.entity(entity).insert(sprite);
/// }
/// }
/// ```
#[derive(Component, Default, Clone)]
pub struct Unload;
/// A trait used to trigger a [`LoadEvent`] via [`Commands`] or [`World`].
pub trait TriggerLoad {
/// Triggers the given [`LoadEvent`].
#[doc(alias = "trigger_single")]
fn trigger_load(self, event: impl LoadEvent);
}
impl TriggerLoad for &mut Commands<'_, '_> {
fn trigger_load(self, event: impl LoadEvent) {
self.trigger_single(event);
}
}
impl TriggerLoad for &mut World {
fn trigger_load(self, event: impl LoadEvent) {
self.trigger_single(event);
}
}
/// A [`QueryFilter`] which determines which entities should be unloaded before the load process begins.
pub type DefaultUnloadFilter = Or<(With<Save>, With<Unload>)>;
/// A [`SingleEvent`] which starts the load process with the given parameters.
///
/// See also:
/// - [`trigger_load`](TriggerLoad::trigger_load)
/// - [`trigger_single`](TriggerSingle::trigger_single)
/// - [`LoadWorld`]
pub trait LoadEvent: SingleEvent {
/// A [`QueryFilter`] used as the initial filter for selecting entities to unload.
type UnloadFilter: QueryFilter;
/// Returns the [`LoadInput`] of the load process.
fn input(&mut self) -> LoadInput;
/// Called once before the load process starts.
///
/// This is useful if you want to modify the world just before loading.
fn before_load(&mut self, _world: &mut World) {}
/// Called once before unloading entities.
///
/// All given entities will be despawned after this call.
/// This is useful if you want to update the world state as a result of unloading these entities.
fn before_unload(&mut self, _world: &mut World, _entities: &[Entity]) {}
/// Called for all entities after they have been loaded.
///
/// This is useful to undo any modifications done before loading.
/// You also have access to [`Loaded`] here for any additional post-processing before [`OnLoad`] is triggered.
fn after_load(&mut self, _world: &mut World, _result: &LoadResult) {}
}
/// A generic [`LoadEvent`] which loads the world from a file or stream.
pub struct LoadWorld<U: QueryFilter = DefaultUnloadFilter> {
/// The input data used to load the world.
pub input: LoadInput,
/// A [`SceneMapper`] used to map components after the load process.
pub mapper: SceneMapper,
#[doc(hidden)]
pub unload: PhantomData<U>,
}
impl<U: QueryFilter> LoadWorld<U> {
/// Creates a new [`LoadWorld`] with the given input and mapper.
pub fn new(input: LoadInput, mapper: SceneMapper) -> Self {
LoadWorld {
input,
mapper,
unload: PhantomData,
}
}
/// Creates a new [`LoadWorld`] which unloads entities matching the given
/// [`QueryFilter`] before the file at given path.
pub fn from_file(path: impl Into<PathBuf>) -> Self {
LoadWorld {
input: LoadInput::File(path.into()),
mapper: SceneMapper::default(),
unload: PhantomData,
}
}
/// Creates a new [`LoadWorld`] which unloads entities matching the given
/// [`QueryFilter`] before loading from the given [`Read`] stream.
pub fn from_stream(stream: impl LoadStream) -> Self {
LoadWorld {
input: LoadInput::Stream(Box::new(stream)),
mapper: SceneMapper::default(),
unload: PhantomData,
}
}
/// Maps the given [`Component`] into another using a [component mapper](MapComponent) after loading.
pub fn map_component<T: Component>(self, m: impl MapComponent<T>) -> Self {
LoadWorld {
mapper: self.mapper.map(m),
..self
}
}
}
impl LoadWorld {
/// Creates a new [`LoadWorld`] event which unloads default entities (with [`Unload`] or [`Save`])
/// before loading the file at the given path.
pub fn default_from_file(path: impl Into<PathBuf>) -> Self {
Self::from_file(path)
}
/// Creates a new [`LoadWorld`] event which unloads default entities (with [`Unload`] or [`Save`])
/// before loading from the given [`Read`] stream.
pub fn default_from_stream(stream: impl LoadStream) -> Self {
Self::from_stream(stream)
}
}
impl<U: QueryFilter> SingleEvent for LoadWorld<U> where U: Static {}
impl<U: QueryFilter> LoadEvent for LoadWorld<U>
where
U: Static,
{
type UnloadFilter = U;
fn input(&mut self) -> LoadInput {
self.input.consume().unwrap()
}
fn before_load(&mut self, world: &mut World) {
world.insert_resource(ExpectDeferred);
}
fn after_load(&mut self, world: &mut World, result: &LoadResult) {
if let Ok(loaded) = result {
for entity in loaded.entities() {
let Ok(entity) = world.get_entity_mut(entity) else {
// Some entities may be invalid during load. See `unsaved.rs` test.
continue;
};
self.mapper.replace(entity);
}
}
expect_deferred(world);
}
}
/// Input of the load process.
pub enum LoadInput {
/// Load from a file at the given path.
File(PathBuf),
/// Load from a [`Read`] stream.
Stream(Box<dyn LoadStream>),
/// Load from a [`DynamicScene`].
///
/// This is useful if you would like to deserialize the scene manually from any data source.
Scene(DynamicScene),
#[doc(hidden)]
Invalid,
}
impl LoadInput {
/// Creates a new [`LoadInput`] which loads from a file at the given path.
pub fn file(path: impl Into<PathBuf>) -> Self {
Self::File(path.into())
}
/// Creates a new [`LoadInput`] which loads from a [`Read`] stream.
pub fn stream<S: LoadStream + 'static>(stream: S) -> Self {
Self::Stream(Box::new(stream))
}
/// Invalidates this [`LoadInput`] and returns it if it was valid.
pub fn consume(&mut self) -> Option<LoadInput> {
let input = std::mem::replace(self, LoadInput::Invalid);
if let LoadInput::Invalid = input {
return None;
}
Some(input)
}
}
/// Alias for a `'static` [`Read`] stream.
pub trait LoadStream: Read
where
Self: Static,
{
}
impl<S: Read> LoadStream for S where S: Static {}
/// An [`Event`] triggered at the end of a successful load process.
///
/// This event contains the loaded entity map.
#[derive(Event)]
pub struct Loaded {
/// The map of all loaded entities and their new entity IDs.
pub entity_map: EntityHashMap<Entity>,
}
impl Loaded {
/// Iterates over all loaded entities.
///
/// Note that not all of these entities may be valid. This would indicate an error with save data.
/// See `unsaved.rs` test for an example of how this may happen.
pub fn entities(&self) -> impl Iterator<Item = Entity> + '_ {
self.entity_map.values().copied()
}
}
#[doc(hidden)]
#[deprecated(since = "0.5.2", note = "use `Loaded` instead")]
pub type OnLoad = Loaded;
/// An error which indicates a failure during the load process.
#[derive(Error, Debug)]
pub enum LoadError {
/// Indicates a failure to access the saved data.
#[error("Failed to read world: {0}")]
Io(io::Error),
/// Indicates a deserialization error.
#[error("Failed to deserialize world: {0}")]
Ron(ron::Error),
/// Indicates a failure to reconstruct the world from the loaded data.
#[error("Failed to spawn scene: {0}")]
Scene(SceneSpawnError),
}
impl From<io::Error> for LoadError {
fn from(e: io::Error) -> Self {
Self::Io(e)
}
}
impl From<ron::de::SpannedError> for LoadError {
fn from(e: ron::de::SpannedError) -> Self {
Self::Ron(e.into())
}
}
impl From<ron::Error> for LoadError {
fn from(e: ron::Error) -> Self {
Self::Ron(e)
}
}
impl From<SceneSpawnError> for LoadError {
fn from(e: SceneSpawnError) -> Self {
Self::Scene(e)
}
}
/// [`Result`] of a [`LoadEvent`].
pub type LoadResult = Result<Loaded, LoadError>;
/// An [`Observer`] which loads the world when a [`LoadWorld`] event is triggered.
pub fn load_on_default_event(event: OnSingle<LoadWorld>, commands: Commands) {
load_on(event, commands);
}
/// An [`Observer`] which loads the world when the given [`LoadEvent`] is triggered.
pub fn load_on<E: LoadEvent>(event: OnSingle<E>, mut commands: Commands) {
commands.queue_handled(LoadCommand(event.consume().unwrap()), |err, ctx| {
error!("load failed: {err:?} ({ctx})");
});
}
fn load_world<E: LoadEvent>(mut event: E, world: &mut World) -> LoadResult {
// Notify
event.before_load(world);
// Deserialize
let scene = match event.input() {
LoadInput::File(path) => {
let bytes = std::fs::read(&path)?;
let mut deserializer = ron::Deserializer::from_bytes(&bytes)?;
let type_registry = &world.resource::<AppTypeRegistry>().read();
let scene_deserializer = SceneDeserializer { type_registry };
scene_deserializer.deserialize(&mut deserializer).unwrap()
}
LoadInput::Stream(mut stream) => {
let mut bytes = Vec::new();
stream.read_to_end(&mut bytes)?;
let mut deserializer = ron::Deserializer::from_bytes(&bytes)?;
let type_registry = &world.resource::<AppTypeRegistry>().read();
let scene_deserializer = SceneDeserializer { type_registry };
scene_deserializer.deserialize(&mut deserializer)?
}
LoadInput::Scene(scene) => scene,
LoadInput::Invalid => {
panic!("LoadInput is invalid");
}
};
// Unload
let entities: Vec<_> = world
.query_filtered::<Entity, E::UnloadFilter>()
.iter(world)
.collect();
event.before_unload(world, &entities);
for entity in entities {
if let Ok(entity) = world.get_entity_mut(entity) {
entity.despawn();
}
}
// Load
let mut entity_map = EntityHashMap::default();
scene.write_to_world(world, &mut entity_map)?;
debug!("loaded {} entities", entity_map.len());
let result = Ok(Loaded { entity_map });
event.after_load(world, &result);
result
}
struct LoadCommand<E>(E);
impl<E: LoadEvent> Command<Result<(), LoadError>> for LoadCommand<E> {
fn apply(self, world: &mut World) -> Result<(), LoadError> {
let loaded = load_world(self.0, world)?;
world.trigger(loaded);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::fs::*;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use super::*;
pub const DATA: &str = "(
resources: {},
entities: {
4294967293: (
components: {
\"moonshine_save::load::tests::Foo\": (),
},
),
},
)";
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
#[require(Save)]
struct Foo;
fn app() -> App {
let mut app = App::new();
app.add_plugins(MinimalPlugins).register_type::<Foo>();
app
}
#[test]
fn test_load_file() {
#[derive(Resource)]
struct EventTriggered;
pub const PATH: &str = "test_load_file.ron";
write(PATH, DATA).unwrap();
let mut app = app();
app.add_observer(load_on_default_event);
app.add_observer(|_: On<Loaded>, mut commands: Commands| {
commands.insert_resource(EventTriggered);
});
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.trigger_load(LoadWorld::default_from_file(PATH));
});
let world = app.world_mut();
assert!(world.contains_resource::<EventTriggered>());
assert!(world
.query_filtered::<(), With<Foo>>()
.single(world)
.is_ok());
remove_file(PATH).unwrap();
}
#[test]
fn test_load_stream() {
pub const PATH: &str = "test_load_stream.ron";
write(PATH, DATA).unwrap();
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.spawn((Foo, Save));
commands.trigger_load(LoadWorld::default_from_stream(File::open(PATH).unwrap()));
});
let data = read_to_string(PATH).unwrap();
assert!(data.contains("Foo"));
remove_file(PATH).unwrap();
}
#[test]
fn test_load_map_component() {
pub const PATH: &str = "test_load_map_component.ron";
write(PATH, DATA).unwrap();
#[derive(Component)]
struct Bar; // Not serializable
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.trigger_load(LoadWorld::default_from_file(PATH).map_component(|_: &Foo| Bar));
});
let world = app.world_mut();
assert!(world
.query_filtered::<(), With<Bar>>()
.single(world)
.is_ok());
assert!(world.query_filtered::<(), With<Foo>>().iter(world).count() == 0);
remove_file(PATH).unwrap();
}
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/tests/mapper.rs | tests/mapper.rs | use std::fs;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use moonshine_save::prelude::*;
use moonshine_util::prelude::*;
const SAVE_PATH: &str = "test_mapper.ron";
#[derive(Bundle)]
struct FooBundle {
foo: Foo,
bar: Bar,
save: Save,
}
impl FooBundle {
fn new(secret: u32) -> Self {
Self {
foo: Foo(Box::new(secret)),
bar: Bar,
save: Save,
}
}
}
#[derive(Component)]
#[require(Save)]
struct Foo(Box<dyn Secret>); // Not serializable
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
struct SerializedFoo(u32);
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
#[require(Save)]
struct Bar;
trait Secret: Static {
fn secret(&self) -> u32;
}
impl Secret for u32 {
fn secret(&self) -> u32 {
*self
}
}
fn app() -> App {
let mut app = App::new();
app.register_type::<Bar>()
.register_type::<SerializedFoo>()
.add_plugins(MinimalPlugins);
app
}
#[test]
fn main() {
{
let mut app = app();
app.add_observer(save_on_default_event);
let entity = app
.world_mut()
.run_system_once(|mut commands: Commands| {
// Spawn some entities
let entity = commands.spawn(FooBundle::new(42)).id();
commands.trigger_save(
SaveWorld::default_into_file(SAVE_PATH)
.map_component(|Foo(data): &Foo| SerializedFoo(data.secret())),
);
entity
})
.unwrap();
// Check pre-conditions
let world = app.world_mut();
assert_eq!(world.query::<&Foo>().single(world).unwrap().0.secret(), 42);
assert!(world.entity(entity).contains::<Bar>());
assert!(world.entity(entity).contains::<Save>());
assert!(!world.entity(entity).contains::<SerializedFoo>());
// Ensure file was written to disk
assert!(fs::read(SAVE_PATH).is_ok());
}
{
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
// Spawn an entity to offset indices
commands.spawn_empty();
// Load
commands.trigger_load(
LoadWorld::default_from_file(SAVE_PATH)
.map_component(|&SerializedFoo(data): &SerializedFoo| Foo(Box::new(data))),
);
});
let world = app.world_mut();
let entity = world
.query_filtered::<Entity, With<Bar>>()
.single(world)
.unwrap();
assert_eq!(world.query::<&Foo>().single(world).unwrap().0.secret(), 42);
assert!(world.entity(entity).contains::<Bar>());
assert!(world.entity(entity).contains::<Save>());
assert!(!world.entity(entity).contains::<SerializedFoo>());
fs::remove_file(SAVE_PATH).unwrap();
}
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/tests/unsaved.rs | tests/unsaved.rs | use std::fs;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use moonshine_save::prelude::*;
const SAVE_PATH: &str = "test_unsaved.ron";
fn app() -> App {
let mut app = App::new();
app.add_plugins(MinimalPlugins);
app
}
#[test]
fn main() {
{
let mut app = app();
app.add_observer(save_on_default_event);
let entity = app
.world_mut()
.run_system_once(|mut commands: Commands| {
let entity = commands
.spawn(Save)
.with_children(|parent| {
parent.spawn((Name::new("A"), Save));
parent.spawn(Name::new("B")); // !!! DANGER: Unsaved, referenced entity
})
.id();
commands.trigger_save(SaveWorld::default_into_file(SAVE_PATH));
entity
})
.unwrap();
let world = app.world();
let children = world.get::<Children>(entity).unwrap();
assert_eq!(children.iter().count(), 2);
for child in children.iter() {
let parent = world.get::<ChildOf>(child).unwrap().parent();
assert_eq!(parent, entity);
}
}
{
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
// Spawn an entity to offset indices
commands.spawn_empty();
// Load
commands.trigger_load(LoadWorld::default_from_file(SAVE_PATH));
});
let world = app.world_mut();
let (_, children) = world.query::<(Entity, &Children)>().single(world).unwrap();
assert_eq!(children.iter().count(), 2); // !!! DANGER: One of the entities must be broken
let mut found_broken = false;
for child in children.iter() {
found_broken |= world.get::<Name>(child).is_none();
}
assert!(found_broken);
}
fs::remove_file(SAVE_PATH).unwrap();
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/tests/resource.rs | tests/resource.rs | use std::fs;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use moonshine_save::prelude::*;
const SAVE_PATH: &str = "test_resource.ron";
#[derive(Resource, Default, Reflect)]
#[reflect(Resource)]
struct Foo;
fn app() -> App {
let mut app = App::new();
app.register_type::<Foo>().add_plugins(MinimalPlugins);
app
}
#[test]
fn main() {
{
let mut app = app();
app.add_observer(save_on_default_event);
app.insert_resource(Foo);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands
.trigger_save(SaveWorld::default_into_file(SAVE_PATH).include_resource::<Foo>());
});
// Check pre-conditions
assert!(app.world().contains_resource::<Foo>());
// Ensure file was written to disk
assert!(fs::read(SAVE_PATH).is_ok());
}
{
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
commands.trigger_load(LoadWorld::default_from_file(SAVE_PATH));
});
assert!(app.world().contains_resource::<Foo>());
fs::remove_file(SAVE_PATH).unwrap();
}
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/tests/hierarchy.rs | tests/hierarchy.rs | use std::fs;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use moonshine_save::prelude::*;
const SAVE_PATH: &str = "test_hierarchy.ron";
fn app() -> App {
let mut app = App::new();
app.add_plugins(MinimalPlugins);
app
}
fn main() {
{
let mut app = app();
app.add_observer(save_on_default_event);
let entity = app
.world_mut()
.run_system_once(|mut commands: Commands| {
let entity = commands
.spawn(Save)
.with_children(|parent| {
parent.spawn(Save);
parent.spawn(Save);
})
.id();
commands.trigger_save(SaveWorld::default_into_file(SAVE_PATH));
entity
})
.unwrap();
let world = app.world();
let children = world.get::<Children>(entity).unwrap();
assert_eq!(children.iter().count(), 2);
for child in children.iter() {
let parent = world.get::<ChildOf>(child).unwrap().0;
assert_eq!(parent, entity);
}
}
{
let data = fs::read_to_string(SAVE_PATH).unwrap();
assert!(data.contains("Parent"));
assert!(data.contains("Children"));
}
{
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
// Spawn an entity to offset indices
commands.spawn_empty();
// Load
commands.trigger_load(LoadWorld::default_from_file(SAVE_PATH));
});
let world = app.world_mut();
let (entity, children) = world.query::<(Entity, &Children)>().single(world).unwrap();
assert_eq!(children.iter().count(), 2);
for child in children.iter() {
let parent = world.get::<ChildOf>(child).unwrap().0;
assert_eq!(parent, entity);
}
}
fs::remove_file(SAVE_PATH).unwrap();
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/tests/basic.rs | tests/basic.rs | use std::fs;
use bevy::prelude::*;
use bevy_ecs::system::RunSystemOnce;
use moonshine_save::prelude::*;
const SAVE_PATH: &str = "test_basic.ron";
#[derive(Bundle)]
struct FooBundle {
foo: Foo,
bar: FooBar,
save: Save,
}
#[derive(Bundle, Default)]
struct BarBundle {
bar: Bar,
save: Save,
}
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
#[require(Save)]
struct Foo(u32);
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
#[require(Save)]
struct Bar;
#[derive(Component, MapEntities, Reflect)]
#[reflect(Component, MapEntities)]
struct FooBar(#[entities] Entity);
impl FromWorld for FooBar {
fn from_world(_: &mut World) -> Self {
Self(Entity::PLACEHOLDER)
}
}
fn app() -> App {
let mut app = App::new();
app.register_type::<Foo>()
.register_type::<FooBar>()
.register_type::<Bar>()
.add_plugins(MinimalPlugins);
app
}
#[test]
fn main() {
{
let mut app = app();
app.add_observer(save_on_default_event);
let bar = app
.world_mut()
.run_system_once(|mut commands: Commands| {
// Spawn some entities
let bar = commands.spawn(BarBundle::default()).id();
commands.spawn(FooBundle {
foo: Foo(42),
bar: FooBar(bar),
save: Save,
});
// Save
commands.trigger_save(SaveWorld::default_into_file(SAVE_PATH));
bar
})
.unwrap();
// Check pre-conditions
let world = app.world_mut();
assert_eq!(world.query::<&Foo>().single(world).unwrap().0, 42);
assert_eq!(world.query::<&FooBar>().single(world).unwrap().0, bar);
assert!(world.entity(bar).contains::<Save>());
// Ensure file was written to disk
assert!(fs::read(SAVE_PATH).is_ok());
}
{
let mut app = app();
app.add_observer(load_on_default_event);
let _ = app.world_mut().run_system_once(|mut commands: Commands| {
// Spawn an entity to offset indices
commands.spawn_empty();
// Load
commands.trigger_load(LoadWorld::default_from_file(SAVE_PATH));
});
let world = app.world_mut();
let bar = world
.query_filtered::<Entity, With<Bar>>()
.single(world)
.unwrap();
assert_eq!(world.query::<&Foo>().single(world).unwrap().0, 42);
assert_eq!(world.query::<&FooBar>().single(world).unwrap().0, bar);
assert!(world.entity(bar).contains::<Save>());
fs::remove_file(SAVE_PATH).unwrap();
}
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
Zeenobit/moonshine_save | https://github.com/Zeenobit/moonshine_save/blob/ca1fdf24a52164d56a38d74598ce975e0bf1ff42/examples/army.rs | examples/army.rs | use bevy::prelude::*;
use bevy_ecs::entity::{EntityMapper, MapEntities};
use moonshine_save::prelude::*;
const SAVE_PATH: &str = "army.ron";
const HELP_TEXT: &str =
"Use the buttons to spawn a new soldier with either a melee or a ranged weapon.
The text displays the army composition by grouping soldiers based on their equipped weapon.
The state of this army can be saved into and loaded from disk.";
fn main() {
let mut app = App::new();
app.add_plugins(DefaultPlugins.set(WindowPlugin {
primary_window: Some(Window {
title: "Army".to_string(),
resolution: (700, 200).into(),
..default()
}),
..default()
}))
// Register game types for de/serialization
.register_type::<Soldier>()
.register_type::<SoldierWeapon>()
.register_type::<Option<Entity>>()
.register_type::<WeaponKind>()
// Add gameplay systems:
.add_systems(Startup, setup)
.add_systems(Update, (update_text, update_buttons))
.add_systems(
Update,
(
add_melee_button_clicked,
add_ranged_button_clicked,
load_button_clicked,
save_button_clicked,
),
)
// Add save/load observers:
.add_observer(save_on_default_event)
.add_observer(load_on_default_event)
.run();
}
/// Represents a soldier entity within the army.
#[derive(Bundle)]
struct SoldierBundle {
// Marker
soldier: Soldier,
// Currently equipped weapon entity
weapon: SoldierWeapon,
// Soldiers should be saved
save: Save,
}
impl SoldierBundle {
fn new(weapon: Entity) -> Self {
Self {
soldier: Soldier,
weapon: SoldierWeapon(Some(weapon)),
save: Save,
}
}
}
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
struct Soldier;
#[derive(Component, Default, Reflect)]
#[reflect(Component, MapEntities)]
struct SoldierWeapon(#[entities] Option<Entity>);
impl MapEntities for SoldierWeapon {
fn map_entities<M: EntityMapper>(&mut self, entity_mapper: &mut M) {
if let Some(weapon) = self.0.as_mut() {
*weapon = entity_mapper.get_mapped(*weapon);
}
}
}
/// Represents a weapon entity which may be equipped by a soldier.
#[derive(Bundle)]
struct WeaponBundle {
// Type of weapon determines whether its owner is ranged or melee
kind: WeaponKind,
// Weapons should be saved
save: Save,
}
impl WeaponBundle {
fn new(kind: WeaponKind) -> Self {
Self { kind, save: Save }
}
}
#[derive(Component, Default, Reflect)]
#[reflect(Component)]
enum WeaponKind {
#[default]
Melee,
Ranged,
}
use WeaponKind::*;
#[derive(Component)]
struct Army;
#[derive(Component)]
struct AddMeleeButton;
#[derive(Component)]
struct AddRangedButton;
#[derive(Component)]
struct SaveButton;
#[derive(Component)]
struct LoadButton;
fn setup(mut commands: Commands) {
// Spawn camera
commands.spawn(Camera2d);
commands
.spawn(Node {
width: Val::Percent(100.0),
flex_direction: FlexDirection::Column,
padding: UiRect::all(Val::Px(20.)),
..default()
})
.with_children(|root| {
// Spawn army text
root.spawn((
Node {
margin: UiRect::bottom(Val::Px(20.)),
..default()
},
Text::new(HELP_TEXT),
TextFont {
font_size: 14.0,
..default()
},
TextColor(Color::WHITE),
));
root.spawn((
Army,
Node {
margin: UiRect::bottom(Val::Px(20.)),
..default()
},
Text::new(""),
TextFont {
font_size: 30.0,
..default()
},
TextColor(Color::WHITE),
));
// Spawn buttons
root.spawn((
Node {
flex_direction: FlexDirection::Row,
..default()
},
children![
button("SPAWN MELEE", AddMeleeButton),
button("SPAWN RANGED", AddRangedButton),
space(Val::Px(20.), Val::Auto),
button("SAVE", SaveButton),
button("LOAD", LoadButton),
],
));
});
}
fn button(value: impl Into<String>, bundle: impl Bundle) -> impl Bundle {
(
bundle,
(
Node {
margin: UiRect::all(Val::Px(5.)),
padding: UiRect::new(Val::Px(10.), Val::Px(10.), Val::Px(5.), Val::Px(5.)),
..default()
},
Button,
),
BackgroundColor(bevy::color::palettes::css::DARK_GRAY.into()),
children![(
Node { ..default() },
Text::new(value.into()),
TextFont {
font_size: 20.,
..default()
},
TextColor(Color::WHITE),
)],
)
}
fn space(width: Val, height: Val) -> impl Bundle {
Node {
width,
height,
..default()
}
}
/// Groups soldiers by the kind of their equipped weapons and displays the results in text.
fn update_text(
soldiers: Query<&SoldierWeapon, With<Soldier>>,
weapon_query: Query<&WeaponKind>,
mut army_query: Query<&mut Text, With<Army>>,
) {
let melee_count = soldiers
.iter()
.filter_map(|SoldierWeapon(entity)| {
entity.and_then(|weapon_entity| weapon_query.get(weapon_entity).ok())
})
.filter(|weapon_kind| matches!(weapon_kind, Melee))
.count();
let ranged_count = soldiers
.iter()
.filter_map(|SoldierWeapon(entity)| {
entity.and_then(|weapon_entity| weapon_query.get(weapon_entity).ok())
})
.filter(|weapon_kind| matches!(weapon_kind, Ranged))
.count();
army_query.single_mut().unwrap().0 =
format!("Soldiers: {melee_count} Melee, {ranged_count} Ranged");
}
const DEFAULT_BUTTON_COLOR: Color = Color::srgb(0.15, 0.15, 0.15);
const HOVERED_BUTTON_COLOR: Color = Color::srgb(0.25, 0.25, 0.25);
const PRESSED_BUTTON_COLOR: Color = Color::srgb(0.35, 0.75, 0.35);
/// Handle color feedback for buttons.
fn update_buttons(
mut interaction_query: Query<(&Interaction, &mut BackgroundColor), Changed<Interaction>>,
) {
for (interaction, mut color) in &mut interaction_query {
match *interaction {
Interaction::Pressed => {
*color = PRESSED_BUTTON_COLOR.into();
}
Interaction::Hovered => {
*color = HOVERED_BUTTON_COLOR.into();
}
Interaction::None => {
*color = DEFAULT_BUTTON_COLOR.into();
}
}
}
}
fn add_ranged_button_clicked(
query: Query<&Interaction, (With<AddRangedButton>, Changed<Interaction>)>,
mut commands: Commands,
) {
if let Ok(Interaction::Pressed) = query.single() {
let weapon = commands.spawn(WeaponBundle::new(Ranged)).id();
commands.spawn(SoldierBundle::new(weapon));
}
}
fn add_melee_button_clicked(
query: Query<&Interaction, (With<AddMeleeButton>, Changed<Interaction>)>,
mut commands: Commands,
) {
if let Ok(Interaction::Pressed) = query.single() {
let weapon = commands.spawn(WeaponBundle::new(Melee)).id();
commands.spawn(SoldierBundle::new(weapon));
}
}
fn save_button_clicked(
query: Query<&Interaction, (With<SaveButton>, Changed<Interaction>)>,
mut commands: Commands,
) {
if let Ok(Interaction::Pressed) = query.single() {
commands.trigger_save(SaveWorld::default_into_file(SAVE_PATH));
}
}
fn load_button_clicked(
query: Query<&Interaction, (With<LoadButton>, Changed<Interaction>)>,
mut commands: Commands,
) {
if let Ok(Interaction::Pressed) = query.single() {
commands.trigger_load(LoadWorld::default_from_file(SAVE_PATH));
}
}
| rust | MIT | ca1fdf24a52164d56a38d74598ce975e0bf1ff42 | 2026-01-04T20:25:00.832058Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/myceliumd/src/main.rs | myceliumd/src/main.rs | use std::io::{self, Read};
use std::net::Ipv4Addr;
use std::path::Path;
use std::sync::Arc;
use std::{
error::Error,
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use std::{fmt::Display, str::FromStr};
use clap::{Args, Parser, Subcommand};
use mycelium::message::TopicConfig;
use serde::{Deserialize, Deserializer};
use tokio::fs::File;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[cfg(target_family = "unix")]
use tokio::signal::{self, unix::SignalKind};
use tokio::sync::Mutex;
use tracing::{debug, error, info, warn};
use crypto::PublicKey;
use mycelium::endpoint::Endpoint;
use mycelium::{crypto, Node};
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
/// The default port on the underlay to listen on for incoming TCP connections.
const DEFAULT_TCP_LISTEN_PORT: u16 = 9651;
/// The default port on the underlay to listen on for incoming Quic connections.
const DEFAULT_QUIC_LISTEN_PORT: u16 = 9651;
/// The default port to use for IPv6 link local peer discovery (UDP).
const DEFAULT_PEER_DISCOVERY_PORT: u16 = 9650;
/// The default listening address for the HTTP API.
const DEFAULT_HTTP_API_SERVER_ADDRESS: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8989);
/// The default listening address for the JSON-RPC API.
const DEFAULT_JSONRPC_API_SERVER_ADDRESS: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8990);
/// Default name of tun interface
#[cfg(not(target_os = "macos"))]
const TUN_NAME: &str = "mycelium";
/// Default name of tun interface
#[cfg(target_os = "macos")]
const TUN_NAME: &str = "utun0";
/// The logging formats that can be selected.
#[derive(Clone, PartialEq, Eq)]
enum LoggingFormat {
Compact,
Logfmt,
/// Same as Logfmt but with color statically disabled
Plain,
}
impl Display for LoggingFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
LoggingFormat::Compact => "compact",
LoggingFormat::Logfmt => "logfmt",
LoggingFormat::Plain => "plain",
}
)
}
}
impl FromStr for LoggingFormat {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"compact" => LoggingFormat::Compact,
"logfmt" => LoggingFormat::Logfmt,
"plain" => LoggingFormat::Plain,
_ => return Err("invalid logging format"),
})
}
}
#[derive(Parser)]
#[command(version)]
struct Cli {
/// Path to the private key file. This will be created if it does not exist. Default
/// [priv_key.bin].
#[arg(short = 'k', long = "key-file", global = true)]
key_file: Option<PathBuf>,
// Configuration file
#[arg(short = 'c', long = "config-file", global = true)]
config_file: Option<PathBuf>,
/// Enable debug logging. Does nothing if `--silent` is set.
#[arg(short = 'd', long = "debug", default_value_t = false)]
debug: bool,
/// Disable all logs except error logs.
#[arg(long = "silent", default_value_t = false)]
silent: bool,
/// The logging format to use. `logfmt` and `compact` is supported.
#[arg(long = "log-format", default_value_t = LoggingFormat::Compact)]
logging_format: LoggingFormat,
#[clap(flatten)]
node_args: NodeArguments,
#[command(subcommand)]
command: Option<Command>,
}
#[derive(Debug, Subcommand)]
pub enum Command {
/// Inspect a public key provided in hex format, or export the local public key if no key is
/// given.
Inspect {
/// Output in json format.
#[arg(long = "json")]
json: bool,
/// The key to inspect.
key: Option<String>,
},
/// Generate a set of new keys for the system at the default path, or the path provided by the
/// --key-file parameter
GenerateKeys {
/// Force generating new keys, removing any existing key in the process
#[arg(long = "force")]
force: bool,
},
/// Actions on the message subsystem
Message {
#[command(subcommand)]
command: MessageCommand,
},
/// Actions related to peers (list, remove, add)
Peers {
#[command(subcommand)]
command: PeersCommand,
},
/// Actions related to routes (selected, fallback, queried, no route)
Routes {
#[command(subcommand)]
command: RoutesCommand,
},
/// Actions related to the SOCKS5 proxy
Proxy {
#[command(subcommand)]
command: ProxyCommand,
},
}
#[derive(Debug, Subcommand)]
pub enum MessageCommand {
Send {
/// Wait for a reply from the receiver.
#[arg(short = 'w', long = "wait", default_value_t = false)]
wait: bool,
/// An optional timeout to wait for. This does nothing if the `--wait` flag is not set. If
/// `--wait` is set and this flag isn't, wait forever for a reply.
#[arg(long = "timeout")]
timeout: Option<u64>,
/// Optional topic of the message. Receivers can filter on this to only receive messages
/// for a chosen topic.
#[arg(short = 't', long = "topic")]
topic: Option<String>,
/// Optional file to use as message body.
#[arg(long = "msg-path")]
msg_path: Option<PathBuf>,
/// Optional message ID to reply to.
#[arg(long = "reply-to")]
reply_to: Option<String>,
/// Destination of the message, either a hex encoded public key, or an IPv6 address in the
/// 400::/7 range.
destination: String,
/// The message to send. This is required if `--msg_path` is not set
message: Option<String>,
},
Receive {
/// An optional timeout to wait for a message. If this is not set, wait forever.
#[arg(long = "timeout")]
timeout: Option<u64>,
/// Optional topic of the message. Only messages with this topic will be received by this
/// command.
#[arg(short = 't', long = "topic")]
topic: Option<String>,
/// Optional file in which the message body will be saved.
#[arg(long = "msg-path")]
msg_path: Option<PathBuf>,
/// Don't print the metadata
#[arg(long = "raw")]
raw: bool,
},
}
#[derive(Debug, Subcommand)]
pub enum PeersCommand {
/// List the connected peers
List {
/// Print the peers list in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Add peer(s)
Add { peers: Vec<String> },
/// Remove peer(s)
Remove { peers: Vec<String> },
}
#[derive(Debug, Subcommand)]
pub enum RoutesCommand {
/// Print all selected routes
Selected {
/// Print selected routes in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Print all fallback routes
Fallback {
/// Print fallback routes in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Print the currently queried subnets
Queried {
/// Print queried subnets in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Print all subnets which are explicitly marked as not having a route
NoRoute {
/// Print subnets in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
}
#[derive(Debug, Subcommand)]
pub enum ProxyCommand {
/// List known proxies
List {
/// Print in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Connect to a proxy, optionally specifying a remote [IPV6]:PORT
Connect {
/// Optional remote socket address, e.g. [407:...]:1080
#[arg(long = "remote")]
remote: Option<String>,
/// Print in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Disconnect from the current proxy
Disconnect,
/// Manage background proxy probing
Probe {
#[command(subcommand)]
command: ProxyProbeCommand,
},
}
#[derive(Debug, Subcommand)]
pub enum ProxyProbeCommand {
/// Start background proxy probing
Start,
/// Stop background proxy probing
Stop,
}
#[derive(Debug, Args)]
pub struct NodeArguments {
/// Peers to connect to.
#[arg(long = "peers", num_args = 1..)]
static_peers: Vec<Endpoint>,
/// Port to listen on for tcp connections.
#[arg(short = 't', long = "tcp-listen-port", default_value_t = DEFAULT_TCP_LISTEN_PORT)]
tcp_listen_port: u16,
/// Disable quic protocol for connecting to peers
#[arg(long = "disable-quic", default_value_t = false)]
disable_quic: bool,
/// Port to listen on for quic connections.
#[arg(short = 'q', long = "quic-listen-port", default_value_t = DEFAULT_QUIC_LISTEN_PORT)]
quic_listen_port: u16,
/// Port to use for link local peer discovery. This uses the UDP protocol.
#[arg(long = "peer-discovery-port", default_value_t = DEFAULT_PEER_DISCOVERY_PORT)]
peer_discovery_port: u16,
/// Disable peer discovery.
///
/// If this flag is passed, the automatic link local peer discovery will not be enabled, and
/// peers must be configured manually. If this is disabled on all local peers, communication
/// between them will go over configured external peers.
#[arg(long = "disable-peer-discovery", default_value_t = false)]
disable_peer_discovery: bool,
/// Address of the HTTP API server.
#[arg(long = "api-addr", default_value_t = DEFAULT_HTTP_API_SERVER_ADDRESS)]
api_addr: SocketAddr,
/// Address of the JSON-RPC API server.
#[arg(long = "jsonrpc-addr", default_value_t = DEFAULT_JSONRPC_API_SERVER_ADDRESS)]
jsonrpc_addr: SocketAddr,
/// Run without creating a TUN interface.
///
/// The system will participate in the network as usual, but won't be able to send out L3
/// packets. Inbound L3 traffic will be silently discarded. The message subsystem will still
/// work however.
#[arg(long = "no-tun", default_value_t = false)]
no_tun: bool,
/// Name to use for the TUN interface, if one is created.
///
/// Setting this only matters if a TUN interface is actually created, i.e. if the `--no-tun`
/// flag is **not** set. The name set here must be valid for the current platform, e.g. on OSX,
/// the name must start with `utun` and be followed by digits.
#[arg(long = "tun-name")]
tun_name: Option<String>,
/// The address on which to expose prometheus metrics, if desired.
///
/// Setting this flag will attempt to start an HTTP server on the provided address, to serve
/// prometheus metrics on the /metrics endpoint. If this flag is not set, metrics are also not
/// collected.
#[arg(long = "metrics-api-address")]
metrics_api_address: Option<SocketAddr>,
/// The firewall mark to set on the mycelium sockets.
///
/// This allows to identify packets that contain encapsulated mycelium packets so that
/// different routing policies can be applied to them.
/// This option only has an effect on Linux.
#[arg(long = "firewall-mark")]
firewall_mark: Option<u32>,
/// The amount of worker tasks to spawn to handle updates.
///
/// By default, updates are processed on a single task only. This is sufficient for most use
/// cases. In case you notice that the node can't keep up with the incoming updates (typically
/// because you are running a public node with a lot of connections), this value can be
/// increased to process updates in parallel.
#[arg(long = "update-workers", default_value_t = 1)]
update_workers: usize,
/// The topic configuration.
///
/// A .toml file containing topic configuration. This is a default action in case the topic is
/// not listed, and an explicit whitelist for allowed subnets/ips which are otherwise allowed
/// to use a topic.
#[arg(long = "topic-config")]
topic_config: Option<PathBuf>,
/// The cache directory for the mycelium CDN module
///
/// This directory will be used to cache reconstructed content blocks which were loaded through
/// the CDN functionallity for faster access next time.
#[arg(long = "cdn-cache")]
cdn_cache: Option<PathBuf>,
/// Enable the dns resolver
///
/// When the DNS resolver is enabled, it will bind a UDP socket on port 53. If this fails, the
/// system will not continue starting. All queries sent to this resolver will be forwarded to
/// the system resolvers.
#[arg(long = "enable-dns")]
enable_dns: bool,
}
#[derive(Debug, Deserialize)]
pub struct MergedNodeConfig {
peers: Vec<Endpoint>,
tcp_listen_port: u16,
disable_quic: bool,
quic_listen_port: u16,
peer_discovery_port: u16,
disable_peer_discovery: bool,
api_addr: SocketAddr,
jsonrpc_addr: SocketAddr,
no_tun: bool,
tun_name: String,
metrics_api_address: Option<SocketAddr>,
firewall_mark: Option<u32>,
update_workers: usize,
topic_config: Option<PathBuf>,
cdn_cache: Option<PathBuf>,
enable_dns: bool,
}
#[derive(Debug, Deserialize, Default)]
struct MyceliumConfig {
#[serde(deserialize_with = "deserialize_optional_endpoint_str_from_toml")]
peers: Option<Vec<Endpoint>>,
tcp_listen_port: Option<u16>,
disable_quic: Option<bool>,
quic_listen_port: Option<u16>,
no_tun: Option<bool>,
tun_name: Option<String>,
disable_peer_discovery: Option<bool>,
peer_discovery_port: Option<u16>,
api_addr: Option<SocketAddr>,
jsonrpc_addr: Option<SocketAddr>,
metrics_api_address: Option<SocketAddr>,
firewall_mark: Option<u32>,
update_workers: Option<usize>,
topic_config: Option<PathBuf>,
cdn_cache: Option<PathBuf>,
enable_dns: Option<bool>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let cli = Cli::parse();
// Init default configuration
let mut mycelium_config = MyceliumConfig::default();
// Load configuration file
if let Some(config_file_path) = &cli.config_file {
if Path::new(config_file_path).exists() {
let config = config::Config::builder()
.add_source(config::File::new(
config_file_path.to_str().unwrap(),
config::FileFormat::Toml,
))
.build()?;
mycelium_config = config.try_deserialize()?;
} else {
let error_msg = format!("Config file {config_file_path:?} not found");
return Err(io::Error::new(io::ErrorKind::NotFound, error_msg).into());
}
} else if let Some(mut conf) = dirs::config_dir() {
// Windows: %APPDATA%/ThreeFold Tech/Mycelium/mycelium.conf
#[cfg(target_os = "windows")]
{
conf = conf
.join("ThreeFold Tech")
.join("Mycelium")
.join("mycelium.toml")
};
// Linux: $HOME/.config/mycelium/mycelium.conf
#[allow(clippy::unnecessary_operation)]
#[cfg(target_os = "linux")]
{
conf = conf.join("mycelium").join("mycelium.toml")
};
// MacOS: $HOME/Library/Application Support/ThreeFold Tech/Mycelium/mycelium.conf
#[cfg(target_os = "macos")]
{
conf = conf
.join("ThreeFold Tech")
.join("Mycelium")
.join("mycelium.toml")
};
if conf.exists() {
info!(
conf_dir = conf.to_str().unwrap(),
"Mycelium is starting with configuration file",
);
let config = config::Config::builder()
.add_source(config::File::new(
conf.to_str().unwrap(),
config::FileFormat::Toml,
))
.build()?;
mycelium_config = config.try_deserialize()?;
}
}
let level = if cli.silent {
tracing::Level::ERROR
} else if cli.debug {
tracing::Level::DEBUG
} else {
tracing::Level::INFO
};
tracing_subscriber::registry()
.with(
EnvFilter::builder()
.with_default_directive(level.into())
.from_env()
.expect("invalid RUST_LOG"),
)
.with(
(cli.logging_format == LoggingFormat::Compact)
.then(|| tracing_subscriber::fmt::Layer::new().compact()),
)
.with((cli.logging_format == LoggingFormat::Logfmt).then(tracing_logfmt::layer))
.with((cli.logging_format == LoggingFormat::Plain).then(|| {
tracing_logfmt::builder()
// Explicitly force color off
.with_ansi_color(false)
.layer()
}))
.init();
let key_path = cli.key_file.unwrap_or_else(|| {
let mut key_path = dirs::data_local_dir().unwrap_or_else(|| ".".into());
// Windows: %LOCALAPPDATA%/ThreeFold Tech/Mycelium/priv_key.bin
#[cfg(target_os = "windows")]
{
key_path = key_path.join("ThreeFold Tech").join("Mycelium")
};
// Linux: $HOME/.local/share/mycelium/priv_key.bin
#[allow(clippy::unnecessary_operation)]
#[cfg(target_os = "linux")]
{
key_path = key_path.join("mycelium")
};
// MacOS: $HOME/Library/Application Support/ThreeFold Tech/Mycelium/priv_key.bin
#[cfg(target_os = "macos")]
{
key_path = key_path.join("ThreeFold Tech").join("Mycelium")
};
// If the dir does not exist, create it
if !key_path.exists() {
info!(
data_dir = key_path.to_str().unwrap(),
"Data config dir does not exist, create it"
);
if let Err(err) = std::fs::create_dir_all(&key_path) {
error!(%err, data_dir = key_path.to_str().unwrap(), "Could not create data directory");
std::process::exit(1);
}
}
key_path = key_path.join("priv_key.bin");
if key_path.exists() {
info!(key_path = key_path.to_str().unwrap(), "Using key file",);
}
key_path
});
match cli.command {
None => {
let merged_config = merge_config(cli.node_args, mycelium_config);
let topic_config = merged_config.topic_config.as_ref().and_then(|path| {
let mut content = String::new();
let mut file = std::fs::File::open(path).ok()?;
file.read_to_string(&mut content).ok()?;
toml::from_str::<TopicConfig>(&content).ok()
});
if topic_config.is_some() {
info!(path = ?merged_config.topic_config, "Loaded topic cofig");
}
let node_keys = get_node_keys(&key_path).await?;
let node_secret_key = if let Some((node_secret_key, _)) = node_keys {
node_secret_key
} else {
warn!("Node key file {key_path:?} not found, generating new keys");
let secret_key = crypto::SecretKey::new();
save_key_file(&secret_key, &key_path).await?;
secret_key
};
let _api = if let Some(metrics_api_addr) = merged_config.metrics_api_address {
let metrics = mycelium_metrics::PrometheusExporter::new();
let config = mycelium::Config {
node_key: node_secret_key,
peers: merged_config.peers,
no_tun: merged_config.no_tun,
tcp_listen_port: merged_config.tcp_listen_port,
quic_listen_port: if merged_config.disable_quic {
None
} else {
Some(merged_config.quic_listen_port)
},
peer_discovery_port: if merged_config.disable_peer_discovery {
None
} else {
Some(merged_config.peer_discovery_port)
},
tun_name: merged_config.tun_name,
private_network_config: None,
metrics: metrics.clone(),
firewall_mark: merged_config.firewall_mark,
update_workers: merged_config.update_workers,
topic_config,
cdn_cache: merged_config.cdn_cache,
enable_dns: merged_config.enable_dns,
};
metrics.spawn(metrics_api_addr);
let node = Arc::new(Mutex::new(Node::new(config).await?));
let http_api = mycelium_api::Http::spawn(node.clone(), merged_config.api_addr);
// Initialize the JSON-RPC server
let rpc_api =
mycelium_api::rpc::JsonRpc::spawn(node, merged_config.jsonrpc_addr).await;
(http_api, rpc_api)
} else {
let config = mycelium::Config {
node_key: node_secret_key,
peers: merged_config.peers,
no_tun: merged_config.no_tun,
tcp_listen_port: merged_config.tcp_listen_port,
quic_listen_port: if merged_config.disable_quic {
None
} else {
Some(merged_config.quic_listen_port)
},
peer_discovery_port: if merged_config.disable_peer_discovery {
None
} else {
Some(merged_config.peer_discovery_port)
},
tun_name: merged_config.tun_name,
private_network_config: None,
metrics: mycelium_metrics::NoMetrics,
firewall_mark: merged_config.firewall_mark,
update_workers: merged_config.update_workers,
topic_config,
cdn_cache: merged_config.cdn_cache,
enable_dns: merged_config.enable_dns,
};
let node = Arc::new(Mutex::new(Node::new(config).await?));
let http_api = mycelium_api::Http::spawn(node.clone(), merged_config.api_addr);
// Initialize the JSON-RPC server
let rpc_api =
mycelium_api::rpc::JsonRpc::spawn(node, merged_config.jsonrpc_addr).await;
(http_api, rpc_api)
};
// TODO: put in dedicated file so we can only rely on certain signals on unix platforms
#[cfg(target_family = "unix")]
{
let mut sigint = signal::unix::signal(SignalKind::interrupt())
.expect("Can install SIGINT handler");
let mut sigterm = signal::unix::signal(SignalKind::terminate())
.expect("Can install SIGTERM handler");
tokio::select! {
_ = sigint.recv() => { }
_ = sigterm.recv() => { }
}
}
#[cfg(not(target_family = "unix"))]
{
if let Err(e) = tokio::signal::ctrl_c().await {
error!("Failed to wait for SIGINT: {e}");
}
}
}
Some(cmd) => match cmd {
Command::Inspect { json, key } => {
let node_keys = get_node_keys(&key_path).await?;
let key = if let Some(key) = key {
PublicKey::try_from(key.as_str())?
} else if let Some((_, node_pub_key)) = node_keys {
node_pub_key
} else {
error!("No key to inspect provided and no key found at {key_path:?}");
return Err(io::Error::new(
io::ErrorKind::NotFound,
"no key to inspect and key file not found",
)
.into());
};
mycelium_cli::inspect(key, json)?;
return Ok(());
}
Command::GenerateKeys { force } => {
let node_keys = get_node_keys(&key_path).await?;
if node_keys.is_none() || force {
info!(?key_path, "Generating new node keys");
let secret_key = crypto::SecretKey::new();
save_key_file(&secret_key, &key_path).await?;
} else {
warn!(?key_path, "Refusing to generate new keys as key file already exists, use `--force` to generate them anyway");
}
}
Command::Message { command } => match command {
MessageCommand::Send {
wait,
timeout,
topic,
msg_path,
reply_to,
destination,
message,
} => {
return mycelium_cli::send_msg(
destination,
message,
wait,
timeout,
reply_to,
topic,
msg_path,
cli.node_args.api_addr,
)
.await
}
MessageCommand::Receive {
timeout,
topic,
msg_path,
raw,
} => {
return mycelium_cli::recv_msg(
timeout,
topic,
msg_path,
raw,
cli.node_args.api_addr,
)
.await
}
},
Command::Peers { command } => match command {
PeersCommand::List { json } => {
return mycelium_cli::list_peers(cli.node_args.api_addr, json).await;
}
PeersCommand::Add { peers } => {
return mycelium_cli::add_peers(cli.node_args.api_addr, peers).await;
}
PeersCommand::Remove { peers } => {
return mycelium_cli::remove_peers(cli.node_args.api_addr, peers).await;
}
},
Command::Routes { command } => match command {
RoutesCommand::Selected { json } => {
return mycelium_cli::list_selected_routes(cli.node_args.api_addr, json).await;
}
RoutesCommand::Fallback { json } => {
return mycelium_cli::list_fallback_routes(cli.node_args.api_addr, json).await;
}
RoutesCommand::Queried { json } => {
return mycelium_cli::list_queried_subnets(cli.node_args.api_addr, json).await;
}
RoutesCommand::NoRoute { json } => {
return mycelium_cli::list_no_route_entries(cli.node_args.api_addr, json).await;
}
},
Command::Proxy { command } => match command {
ProxyCommand::List { json } => {
return mycelium_cli::list_proxies(cli.node_args.api_addr, json).await;
}
ProxyCommand::Connect { remote, json } => {
let remote_parsed = if let Some(r) = remote {
match r.parse::<SocketAddr>() {
Ok(addr) => Some(addr),
Err(e) => {
error!("Invalid --remote value '{r}': {e}");
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("invalid --remote socket address: {e}"),
)
.into());
}
}
} else {
None
};
return mycelium_cli::connect_proxy(
cli.node_args.api_addr,
remote_parsed,
json,
)
.await;
}
ProxyCommand::Disconnect => {
return mycelium_cli::disconnect_proxy(cli.node_args.api_addr).await;
}
ProxyCommand::Probe { command } => match command {
ProxyProbeCommand::Start => {
return mycelium_cli::start_proxy_probe(cli.node_args.api_addr).await;
}
ProxyProbeCommand::Stop => {
return mycelium_cli::stop_proxy_probe(cli.node_args.api_addr).await;
}
},
},
},
}
Ok(())
}
async fn get_node_keys(
key_path: &PathBuf,
) -> Result<Option<(crypto::SecretKey, crypto::PublicKey)>, io::Error> {
if key_path.exists() {
let sk = load_key_file(key_path).await?;
let pk = crypto::PublicKey::from(&sk);
debug!("Loaded key file at {key_path:?}");
Ok(Some((sk, pk)))
} else {
Ok(None)
}
}
async fn load_key_file<T>(path: &Path) -> Result<T, io::Error>
where
T: From<[u8; 32]>,
{
let mut file = File::open(path).await?;
let mut secret_bytes = [0u8; 32];
file.read_exact(&mut secret_bytes).await?;
Ok(T::from(secret_bytes))
}
/// Save a key to a file at the given path. If the file already exists, it will be overwritten.
async fn save_key_file(key: &crypto::SecretKey, path: &Path) -> io::Result<()> {
#[cfg(target_family = "unix")]
{
use tokio::fs::OpenOptions;
let mut file = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.mode(0o644)
.open(path)
.await?;
file.write_all(key.as_bytes()).await?;
}
#[cfg(not(target_family = "unix"))]
{
let mut file = File::create(path).await?;
file.write_all(key.as_bytes()).await?;
}
Ok(())
}
fn merge_config(cli_args: NodeArguments, file_config: MyceliumConfig) -> MergedNodeConfig {
MergedNodeConfig {
peers: if !cli_args.static_peers.is_empty() {
cli_args.static_peers
} else {
file_config.peers.unwrap_or_default()
},
tcp_listen_port: if cli_args.tcp_listen_port != DEFAULT_TCP_LISTEN_PORT {
cli_args.tcp_listen_port
} else {
file_config
.tcp_listen_port
.unwrap_or(DEFAULT_TCP_LISTEN_PORT)
},
disable_quic: cli_args.disable_quic || file_config.disable_quic.unwrap_or(false),
quic_listen_port: if cli_args.quic_listen_port != DEFAULT_QUIC_LISTEN_PORT {
cli_args.quic_listen_port
} else {
file_config
.quic_listen_port
.unwrap_or(DEFAULT_QUIC_LISTEN_PORT)
},
peer_discovery_port: if cli_args.peer_discovery_port != DEFAULT_PEER_DISCOVERY_PORT {
cli_args.peer_discovery_port
} else {
file_config
.peer_discovery_port
.unwrap_or(DEFAULT_PEER_DISCOVERY_PORT)
},
disable_peer_discovery: cli_args.disable_peer_discovery
|| file_config.disable_peer_discovery.unwrap_or(false),
api_addr: if cli_args.api_addr != DEFAULT_HTTP_API_SERVER_ADDRESS {
cli_args.api_addr
} else {
file_config
.api_addr
.unwrap_or(DEFAULT_HTTP_API_SERVER_ADDRESS)
},
jsonrpc_addr: if cli_args.jsonrpc_addr != DEFAULT_JSONRPC_API_SERVER_ADDRESS {
cli_args.jsonrpc_addr
} else {
file_config
.jsonrpc_addr
.unwrap_or(DEFAULT_JSONRPC_API_SERVER_ADDRESS)
},
no_tun: cli_args.no_tun || file_config.no_tun.unwrap_or(false),
tun_name: if let Some(tun_name_cli) = cli_args.tun_name {
tun_name_cli
} else if let Some(tun_name_config) = file_config.tun_name {
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | true |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-cli/src/lib.rs | mycelium-cli/src/lib.rs | mod inspect;
#[cfg(feature = "message")]
mod message;
mod peer;
mod proxy;
mod routes;
pub use inspect::inspect;
#[cfg(feature = "message")]
pub use message::{recv_msg, send_msg};
pub use peer::{add_peers, list_peers, remove_peers};
pub use proxy::{
connect_proxy, disconnect_proxy, list_proxies, start_proxy_probe, stop_proxy_probe,
};
pub use routes::{
list_fallback_routes, list_no_route_entries, list_queried_subnets, list_selected_routes,
};
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-cli/src/message.rs | mycelium-cli/src/message.rs | use std::{
io::Write,
mem,
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use base64::{
alphabet,
engine::{GeneralPurpose, GeneralPurposeConfig},
Engine,
};
use mycelium::{crypto::PublicKey, message::MessageId, subnet::Subnet};
use serde::{Serialize, Serializer};
use tracing::{debug, error};
use mycelium_api::{MessageDestination, MessageReceiveInfo, MessageSendInfo, PushMessageResponse};
enum Payload {
Readable(String),
NotReadable(Vec<u8>),
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct CliMessage {
id: MessageId,
src_ip: IpAddr,
src_pk: PublicKey,
dst_ip: IpAddr,
dst_pk: PublicKey,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "serialize_payload")]
topic: Option<Payload>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "serialize_payload")]
payload: Option<Payload>,
}
const B64ENGINE: GeneralPurpose = base64::engine::general_purpose::GeneralPurpose::new(
&alphabet::STANDARD,
GeneralPurposeConfig::new(),
);
fn serialize_payload<S: Serializer>(p: &Option<Payload>, s: S) -> Result<S::Ok, S::Error> {
let base64 = match p {
None => None,
Some(Payload::Readable(data)) => Some(data.clone()),
Some(Payload::NotReadable(data)) => Some(B64ENGINE.encode(data)),
};
<Option<String>>::serialize(&base64, s)
}
/// Encode arbitrary data in standard base64.
pub fn encode_base64(input: &[u8]) -> String {
B64ENGINE.encode(input)
}
/// Send a message to a receiver.
#[allow(clippy::too_many_arguments)]
pub async fn send_msg(
destination: String,
msg: Option<String>,
wait: bool,
timeout: Option<u64>,
reply_to: Option<String>,
topic: Option<String>,
msg_path: Option<PathBuf>,
server_addr: SocketAddr,
) -> Result<(), Box<dyn std::error::Error>> {
if reply_to.is_some() && wait {
error!("Can't wait on a reply for a reply, either use --reply-to or --wait");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Only one of --reply-to or --wait is allowed",
)
.into());
}
let destination = if destination.len() == 64 {
// Public key in hex format
match PublicKey::try_from(&*destination) {
Err(_) => {
error!("{destination} is not a valid hex encoded public key");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Invalid hex encoded public key",
)
.into());
}
Ok(pk) => MessageDestination::Pk(pk),
}
} else {
match destination.parse() {
Err(e) => {
error!("{destination} is not a valid IPv6 address: {e}");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Invalid IPv6 address",
)
.into());
}
Ok(ip) => {
let global_subnet = Subnet::new(
mycelium::GLOBAL_SUBNET_ADDRESS,
mycelium::GLOBAL_SUBNET_PREFIX_LEN,
)
.unwrap();
if !global_subnet.contains_ip(ip) {
error!("{destination} is not a part of {global_subnet}");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"IPv6 address is not part of the mycelium subnet",
)
.into());
}
MessageDestination::Ip(ip)
}
}
};
// Load msg, files have prio.
let msg = if let Some(path) = msg_path {
match tokio::fs::read(&path).await {
Err(e) => {
error!("Could not read file at {:?}: {e}", path);
return Err(e.into());
}
Ok(data) => data,
}
} else if let Some(msg) = msg {
msg.into_bytes()
} else {
error!("Message is a required argument if `--msg-path` is not provided");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Message is a required argument if `--msg-path` is not provided",
)
.into());
};
let mut url = format!("http://{server_addr}/api/v1/messages");
if let Some(reply_to) = reply_to {
url.push_str(&format!("/reply/{reply_to}"));
}
if wait {
// A year should be sufficient to wait
let reply_timeout = timeout.unwrap_or(60 * 60 * 24 * 365);
url.push_str(&format!("?reply_timeout={reply_timeout}"));
}
match reqwest::Client::new()
.post(url)
.json(&MessageSendInfo {
dst: destination,
topic: topic.map(String::into_bytes),
payload: msg,
})
.send()
.await
{
Err(e) => {
error!("Failed to send request: {e}");
return Err(e.into());
}
Ok(res) => {
if res.status() == STATUSCODE_NO_CONTENT {
return Ok(());
}
match res.json::<PushMessageResponse>().await {
Err(e) => {
error!("Failed to load response body {e}");
return Err(e.into());
}
Ok(resp) => {
match resp {
PushMessageResponse::Id(id) => {
let _ = serde_json::to_writer(std::io::stdout(), &id);
}
PushMessageResponse::Reply(mri) => {
let cm = CliMessage {
id: mri.id,
topic: mri.topic.map(|topic| {
if let Ok(s) = String::from_utf8(topic.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(topic)
}
}),
src_ip: mri.src_ip,
src_pk: mri.src_pk,
dst_ip: mri.dst_ip,
dst_pk: mri.dst_pk,
payload: Some({
if let Ok(s) = String::from_utf8(mri.payload.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(mri.payload)
}
}),
};
let _ = serde_json::to_writer(std::io::stdout(), &cm);
}
}
println!();
}
}
}
}
Ok(())
}
const STATUSCODE_NO_CONTENT: u16 = 204;
pub async fn recv_msg(
timeout: Option<u64>,
topic: Option<String>,
msg_path: Option<PathBuf>,
raw: bool,
server_addr: SocketAddr,
) -> Result<(), Box<dyn std::error::Error>> {
// One year timeout should be sufficient
let timeout = timeout.unwrap_or(60 * 60 * 24 * 365);
let mut url = format!("http://{server_addr}/api/v1/messages?timeout={timeout}");
if let Some(ref topic) = topic {
if topic.len() > 255 {
error!("{topic} is longer than the maximum allowed topic length of 255");
return Err(
std::io::Error::new(std::io::ErrorKind::InvalidInput, "Topic too long").into(),
);
}
url.push_str(&format!("&topic={}", encode_base64(topic.as_bytes())));
}
let mut cm = match reqwest::get(url).await {
Err(e) => {
error!("Failed to wait for message: {e}");
return Err(e.into());
}
Ok(resp) => {
if resp.status() == STATUSCODE_NO_CONTENT {
debug!("No message ready yet");
return Ok(());
}
debug!("Received message response");
match resp.json::<MessageReceiveInfo>().await {
Err(e) => {
error!("Failed to load response json: {e}");
return Err(e.into());
}
Ok(mri) => CliMessage {
id: mri.id,
topic: mri.topic.map(|topic| {
if let Ok(s) = String::from_utf8(topic.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(topic)
}
}),
src_ip: mri.src_ip,
src_pk: mri.src_pk,
dst_ip: mri.dst_ip,
dst_pk: mri.dst_pk,
payload: Some({
if let Ok(s) = String::from_utf8(mri.payload.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(mri.payload)
}
}),
},
}
}
};
if let Some(ref file_path) = msg_path {
if let Err(e) = tokio::fs::write(
&file_path,
match mem::take(&mut cm.payload).unwrap() {
Payload::Readable(ref s) => s as &dyn AsRef<[u8]>,
Payload::NotReadable(ref v) => v,
},
)
.await
{
error!("Failed to write response payload to file: {e}");
return Err(e.into());
}
}
if raw {
// only print payload if not already written
if msg_path.is_none() {
let _ = std::io::stdout().write_all(match cm.payload.unwrap() {
Payload::Readable(ref s) => s.as_bytes(),
Payload::NotReadable(ref v) => v,
});
println!();
}
} else {
let _ = serde_json::to_writer(std::io::stdout(), &cm);
println!();
}
Ok(())
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-cli/src/peer.rs | mycelium-cli/src/peer.rs | use mycelium::peer_manager::PeerStats;
use mycelium_api::AddPeer;
use prettytable::{row, Table};
use std::net::SocketAddr;
use tracing::{debug, error};
/// List the peers the current node is connected to
pub async fn list_peers(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
// Make API call
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve peers");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing connected peers");
match resp.json::<Vec<PeerStats>>().await {
Err(e) => {
error!("Failed to load response json: {e}");
return Err(e.into());
}
Ok(peers) => {
if json_print {
// Print peers in JSON format
let json_output = serde_json::to_string_pretty(&peers)?;
println!("{json_output}");
} else {
// Print peers in table format
let mut table = Table::new();
table.add_row(row![
"Protocol",
"Socket",
"Type",
"Connection",
"Rx total",
"Tx total",
"Discovered",
"Last connection"
]);
for peer in peers.iter() {
table.add_row(row![
peer.endpoint.proto(),
peer.endpoint.address(),
peer.pt,
peer.connection_state,
format_bytes(peer.rx_bytes),
format_bytes(peer.tx_bytes),
format_seconds(peer.discovered),
peer.last_connected
.map(format_seconds)
.unwrap_or("Never connected".to_string()),
]);
}
table.printstd();
}
}
}
}
};
Ok(())
}
fn format_bytes(bytes: u64) -> String {
let byte = byte_unit::Byte::from_u64(bytes);
let adjusted_byte = byte.get_appropriate_unit(byte_unit::UnitType::Binary);
format!(
"{:.2} {}",
adjusted_byte.get_value(),
adjusted_byte.get_unit()
)
}
/// Convert an amount of seconds into a human readable string.
fn format_seconds(total_seconds: u64) -> String {
let seconds = total_seconds % 60;
let minutes = (total_seconds / 60) % 60;
let hours = (total_seconds / 3600) % 60;
let days = (total_seconds / 86400) % 60;
if days > 0 {
format!("{days}d {hours}h {minutes}m {seconds}s")
} else if hours > 0 {
format!("{hours}h {minutes}m {seconds}s")
} else if minutes > 0 {
format!("{minutes}m {seconds}s")
} else {
format!("{seconds}s")
}
}
/// Remove peer(s) by (underlay) IP
pub async fn remove_peers(
server_addr: SocketAddr,
peers: Vec<String>,
) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
for peer in peers.iter() {
// encode to pass in URL
let peer_encoded = urlencoding::encode(peer);
let request_url = format!("http://{server_addr}/api/v1/admin/peers/{peer_encoded}");
if let Err(e) = client
.delete(&request_url)
.send()
.await
.and_then(|res| res.error_for_status())
{
error!("Failed to delete peer: {e}");
return Err(e.into());
}
}
Ok(())
}
/// Add peer(s) by (underlay) IP
pub async fn add_peers(
server_addr: SocketAddr,
peers: Vec<String>,
) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
for peer in peers.into_iter() {
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
if let Err(e) = client
.post(&request_url)
.json(&AddPeer { endpoint: peer })
.send()
.await
.and_then(|res| res.error_for_status())
{
error!("Failed to add peer: {e}");
return Err(e.into());
}
}
Ok(())
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-cli/src/routes.rs | mycelium-cli/src/routes.rs | use mycelium_api::{NoRouteSubnet, QueriedSubnet, Route};
use prettytable::{row, Table};
use std::net::SocketAddr;
use tracing::{debug, error};
pub async fn list_selected_routes(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/selected");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve selected routes");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing selected routes");
if json_print {
// API call returns routes in JSON format by default
let selected_routes = resp.text().await?;
println!("{selected_routes}");
} else {
// Print routes in table format
let routes: Vec<Route> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Next Hop", "Metric", "Seq No"]);
for route in routes.iter() {
table.add_row(row![
&route.subnet,
&route.next_hop,
route.metric,
route.seqno,
]);
}
table.printstd();
}
}
}
Ok(())
}
pub async fn list_fallback_routes(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/fallback");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve fallback routes");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing fallback routes");
if json_print {
// API call returns routes in JSON format by default
let fallback_routes = resp.text().await?;
println!("{fallback_routes}");
} else {
// Print routes in table format
let routes: Vec<Route> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Next Hop", "Metric", "Seq No"]);
for route in routes.iter() {
table.add_row(row![
&route.subnet,
&route.next_hop,
route.metric,
route.seqno,
]);
}
table.printstd();
}
}
}
Ok(())
}
pub async fn list_queried_subnets(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/queried");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve queried subnets");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing queried routes");
if json_print {
// API call returns routes in JSON format by default
let queried_routes = resp.text().await?;
println!("{queried_routes}");
} else {
// Print routes in table format
let queries: Vec<QueriedSubnet> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Query expiration"]);
for query in queries.iter() {
table.add_row(row![query.subnet, query.expiration,]);
}
table.printstd();
}
}
}
Ok(())
}
pub async fn list_no_route_entries(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/no_route");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve subnets with no route entries");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing no route entries");
if json_print {
// API call returns routes in JSON format by default
let nrs = resp.text().await?;
println!("{nrs}");
} else {
// Print routes in table format
let no_routes: Vec<NoRouteSubnet> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Entry expiration"]);
for nrs in no_routes.iter() {
table.add_row(row![nrs.subnet, nrs.expiration,]);
}
table.printstd();
}
}
}
Ok(())
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-cli/src/proxy.rs | mycelium-cli/src/proxy.rs | use prettytable::{row, Table};
use std::net::{Ipv6Addr, SocketAddr};
use tracing::{debug, error};
#[derive(serde::Serialize)]
struct ConnectProxyRequest {
remote: Option<String>,
}
/// List known valid proxy IPv6 addresses discovered by the node.
pub async fn list_proxies(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/proxy");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve proxies");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing known proxies");
if json_print {
let body = resp.text().await?;
println!("{body}");
} else {
let proxies: Vec<Ipv6Addr> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["IPv6 Address"]);
for ip in proxies {
table.add_row(row![ip]);
}
table.printstd();
}
}
}
Ok(())
}
/// Connect to a proxy. When `remote` is None, the node will auto-select the best known proxy.
pub async fn connect_proxy(
server_addr: SocketAddr,
remote: Option<SocketAddr>,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
let request_url = format!("http://{server_addr}/api/v1/admin/proxy");
let payload = ConnectProxyRequest {
remote: remote.map(|r| r.to_string()),
};
let res = client.post(&request_url).json(&payload).send().await;
match res {
Err(e) => {
error!("Failed to send connect proxy request: {e}");
Err(e.into())
}
Ok(resp) => {
if resp.status().is_success() {
if json_print {
let body = resp.text().await?;
println!("{body}");
} else {
let addr: SocketAddr = resp.json().await?;
println!("{addr}");
}
Ok(())
} else if resp.status().as_u16() == 404 {
error!("No valid proxy available or connection failed");
Err(std::io::Error::new(
std::io::ErrorKind::NotFound,
"No valid proxy available or connection failed",
)
.into())
} else {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
error!("Proxy connect failed, status {status}, body: {body}");
Err(std::io::Error::other(format!("HTTP {status}")).into())
}
}
}
}
/// Disconnect from the current proxy, if any.
pub async fn disconnect_proxy(server_addr: SocketAddr) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
let request_url = format!("http://{server_addr}/api/v1/admin/proxy");
if let Err(e) = client
.delete(&request_url)
.send()
.await
.and_then(|r| r.error_for_status())
{
error!("Failed to disconnect proxy: {e}");
return Err(e.into());
}
Ok(())
}
/// Start background probing for proxies.
pub async fn start_proxy_probe(server_addr: SocketAddr) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/proxy/probe");
if let Err(e) = reqwest::get(&request_url)
.await
.and_then(|r| r.error_for_status())
{
error!("Failed to start proxy probe: {e}");
return Err(e.into());
}
Ok(())
}
/// Stop background probing for proxies.
pub async fn stop_proxy_probe(server_addr: SocketAddr) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
let request_url = format!("http://{server_addr}/api/v1/admin/proxy/probe");
if let Err(e) = client
.delete(&request_url)
.send()
.await
.and_then(|r| r.error_for_status())
{
error!("Failed to stop proxy probe: {e}");
return Err(e.into());
}
Ok(())
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-cli/src/inspect.rs | mycelium-cli/src/inspect.rs | use std::net::IpAddr;
use mycelium::crypto::PublicKey;
use serde::Serialize;
#[derive(Debug, Serialize)]
struct InspectOutput {
#[serde(rename = "publicKey")]
public_key: PublicKey,
address: IpAddr,
}
/// Inspect the given pubkey, or the local key if no pubkey is given
pub fn inspect(pubkey: PublicKey, json: bool) -> Result<(), Box<dyn std::error::Error>> {
let address = pubkey.address().into();
if json {
let out = InspectOutput {
public_key: pubkey,
address,
};
let out_string = serde_json::to_string_pretty(&out)?;
println!("{out_string}");
} else {
println!("Public key: {pubkey}");
println!("Address: {address}");
}
Ok(())
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mobile/src/lib.rs | mobile/src/lib.rs | use std::convert::TryFrom;
use std::io;
use std::net::{IpAddr, SocketAddr};
use tracing::{error, info};
use metrics::Metrics;
use mycelium::endpoint::Endpoint;
use mycelium::{crypto, metrics, Config, Node};
use once_cell::sync::Lazy;
use tokio::sync::{mpsc, Mutex};
use tokio::time::{sleep, timeout, Duration};
const CHANNEL_MSG_OK: &str = "ok";
const CHANNEL_TIMEOUT: u64 = 2;
/// Default Socks5 port.
// TODO: Port should be included in mycelium response
const DEFAULT_SOCKS_PORT: u16 = 1080;
#[cfg(target_os = "android")]
fn setup_logging() {
use tracing::level_filters::LevelFilter;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
let targets = Targets::new()
.with_default(LevelFilter::INFO)
.with_target("mycelium::router", LevelFilter::WARN);
tracing_subscriber::registry()
.with(tracing_android::layer("mycelium").expect("failed to setup logger"))
.with(targets)
.init();
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
fn setup_logging() {
use tracing::level_filters::LevelFilter;
use tracing_oslog::OsLogger;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
let targets = Targets::new()
.with_default(LevelFilter::INFO)
.with_target("mycelium::router", LevelFilter::WARN);
tracing_subscriber::registry()
.with(OsLogger::new("mycelium", "default"))
.with(targets)
.init();
}
#[cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
static INIT_LOG: Lazy<()> = Lazy::new(|| {
setup_logging();
});
#[cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
fn setup_logging_once() {
// Accessing the Lazy value will ensure setup_logging is called exactly once
let _ = &*INIT_LOG;
}
// Declare the channel globally so we can use it on the start & stop mycelium functions
type CommandChannelType = (Mutex<mpsc::Sender<Cmd>>, Mutex<mpsc::Receiver<Cmd>>);
static COMMAND_CHANNEL: Lazy<CommandChannelType> = Lazy::new(|| {
let (tx_cmd, rx_cmd) = mpsc::channel::<Cmd>(1);
(Mutex::new(tx_cmd), Mutex::new(rx_cmd))
});
type ResponseChannelType = (
Mutex<mpsc::Sender<Response>>,
Mutex<mpsc::Receiver<Response>>,
);
static RESPONSE_CHANNEL: Lazy<ResponseChannelType> = Lazy::new(|| {
let (tx_resp, rx_resp) = mpsc::channel::<Response>(1);
(Mutex::new(tx_resp), Mutex::new(rx_resp))
});
#[tokio::main]
#[allow(unused_variables)] // because tun_fd is only used in android and ios
pub async fn start_mycelium(peers: Vec<String>, tun_fd: i32, priv_key: Vec<u8>, enable_dns: bool) {
#[cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
setup_logging_once();
info!("starting mycelium");
let endpoints: Vec<Endpoint> = peers
.into_iter()
.filter_map(|peer| peer.parse().ok())
.collect();
let secret_key = build_secret_key(priv_key).await.unwrap();
let config = Config {
node_key: secret_key,
peers: endpoints,
no_tun: false,
tcp_listen_port: DEFAULT_TCP_LISTEN_PORT,
quic_listen_port: None,
peer_discovery_port: None, // disable multicast discovery
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
tun_name: "tun0".to_string(),
metrics: NoMetrics,
private_network_config: None,
firewall_mark: None,
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
tun_fd: Some(tun_fd),
update_workers: 1,
cdn_cache: None,
enable_dns,
};
let _node = match Node::new(config).await {
Ok(node) => {
info!("node successfully created");
node
}
Err(err) => {
error!("failed to create mycelium node: {err}");
return;
}
};
let mut rx = COMMAND_CHANNEL.1.lock().await;
loop {
tokio::select! {
_ = tokio::signal::ctrl_c() => {
info!("Received SIGINT, stopping mycelium node");
break;
}
cmd = rx.recv() => {
match cmd.unwrap().cmd {
CmdType::Stop => {
info!("Received stop command, stopping mycelium node");
send_response(vec![CHANNEL_MSG_OK.to_string()]).await;
break;
}
CmdType::Status => {
let mut vec: Vec<String> = Vec::new();
for info in _node.peer_info() {
// Create a JSON object with detailed peer statistics
let peer_json = serde_json::json!({
"protocol": info.endpoint.proto().to_string(),
"address": info.endpoint.address().to_string(),
"peerType": info.pt.to_string(),
"connectionState": info.connection_state.to_string(),
"rxBytes": info.rx_bytes,
"txBytes": info.tx_bytes,
"discoveredSeconds": info.discovered,
"lastConnectedSeconds": info.last_connected
});
vec.push(peer_json.to_string());
}
send_response(vec).await;
}
CmdType::ProxyProbeStart => {
info!("Received proxy probe start command");
_node.start_proxy_scan();
send_response(vec![CHANNEL_MSG_OK.to_string()]).await;
},
CmdType::ProxyProbeStop => {
info!("Received proxy probe stop command");
_node.stop_proxy_scan();
send_response(vec![CHANNEL_MSG_OK.to_string()]).await;
},
CmdType::ProxyList => {
info!("Received proxy list command");
let known_proxies = _node.known_proxies();
send_response(known_proxies.into_iter().map(|ip| SocketAddr::from((IpAddr::from(ip), DEFAULT_SOCKS_PORT)).to_string()).collect()).await;
},
CmdType::ProxyConnect(remote) => {
info!(?remote, "Received proxy connect command");
let res = match _node.connect_proxy(remote).await {
Ok(v) => v.to_string(),
Err(e) => e.to_string(),
};
send_response(vec![res]).await;
},
CmdType::ProxyDisconnect => {
info!("Received proxy disconnect command");
_node.disconnect_proxy();
send_response(vec![CHANNEL_MSG_OK.to_string()]).await;
},
}
}
}
}
info!("mycelium stopped");
}
struct Cmd {
cmd: CmdType,
}
enum CmdType {
Stop,
Status,
ProxyProbeStart,
ProxyProbeStop,
ProxyList,
ProxyConnect(Option<SocketAddr>),
ProxyDisconnect,
}
struct Response {
response: Vec<String>,
}
// stop_mycelium returns string with the status of the command
#[tokio::main]
pub async fn stop_mycelium() -> String {
if let Err(e) = send_command(CmdType::Stop).await {
return e.to_string();
}
match recv_response().await {
Ok(_) => CHANNEL_MSG_OK.to_string(),
Err(e) => e.to_string(),
}
}
// get_peer_status returns vector of string
// first element is always the status of the command (ok or error)
// next elements are the peer status
#[tokio::main]
pub async fn get_peer_status() -> Vec<String> {
if let Err(e) = send_command(CmdType::Status).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
#[tokio::main]
pub async fn start_proxy_probe() -> Vec<String> {
if let Err(e) = send_command(CmdType::ProxyProbeStart).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
#[tokio::main]
pub async fn stop_proxy_probe() -> Vec<String> {
if let Err(e) = send_command(CmdType::ProxyProbeStop).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
#[tokio::main]
pub async fn list_proxies() -> Vec<String> {
if let Err(e) = send_command(CmdType::ProxyList).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
/// Conenct to the given proxy. Remote must either be an empty string, or a valid socket address
/// (e.g. "[400:abcd:efgh::1]:1080").
#[tokio::main]
pub async fn proxy_connect(remote: String) -> Vec<String> {
let remote = if remote.is_empty() {
None
} else {
match remote.parse::<SocketAddr>() {
Ok(s) => Some(s),
Err(e) => {
return vec![e.to_string()];
}
}
};
if let Err(e) = send_command(CmdType::ProxyConnect(remote)).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
#[tokio::main]
pub async fn proxy_disconnect() -> Vec<String> {
if let Err(e) = send_command(CmdType::ProxyDisconnect).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
#[tokio::main]
pub async fn get_status() -> Result<String, NodeError> {
Err(NodeError::NodeDead)
}
use thiserror::Error;
#[derive(Error, Debug)]
pub enum NodeError {
#[error("err_node_dead")]
NodeDead,
#[error("err_node_timeout")]
NodeTimeout,
}
async fn send_command(cmd_type: CmdType) -> Result<(), NodeError> {
let tx = COMMAND_CHANNEL.0.lock().await;
tokio::select! {
_ = sleep(Duration::from_secs(CHANNEL_TIMEOUT)) => {
Err(NodeError::NodeTimeout)
}
result = tx.send(Cmd { cmd: cmd_type }) => {
match result {
Ok(_) => Ok(()),
Err(_) => Err(NodeError::NodeDead)
}
}
}
}
async fn send_response(resp: Vec<String>) {
let tx = RESPONSE_CHANNEL.0.lock().await;
tokio::select! {
_ = sleep(Duration::from_secs(CHANNEL_TIMEOUT)) => {
error!("send_response timeout");
}
result = tx.send(Response { response: resp }) => {
match result {
Ok(_) => {},
Err(_) =>{error!("send_response failed");},
}
}
}
}
async fn recv_response() -> Result<Vec<String>, NodeError> {
let mut rx = RESPONSE_CHANNEL.1.lock().await;
let duration = Duration::from_secs(CHANNEL_TIMEOUT);
match timeout(duration, rx.recv()).await {
Ok(result) => match result {
Some(resp) => Ok(resp.response),
None => Err(NodeError::NodeDead),
},
Err(_) => Err(NodeError::NodeTimeout),
}
}
#[derive(Clone)]
pub struct NoMetrics;
impl Metrics for NoMetrics {}
/// The default port on the underlay to listen on for incoming TCP connections.
const DEFAULT_TCP_LISTEN_PORT: u16 = 9651;
fn convert_slice_to_array32(slice: &[u8]) -> Result<[u8; 32], std::array::TryFromSliceError> {
<[u8; 32]>::try_from(slice)
}
async fn build_secret_key<T>(bin: Vec<u8>) -> Result<T, io::Error>
where
T: From<[u8; 32]>,
{
Ok(T::from(convert_slice_to_array32(bin.as_slice()).unwrap()))
}
/// generate secret key
/// it is used by android & ios app
pub fn generate_secret_key() -> Vec<u8> {
crypto::SecretKey::new().as_bytes().into()
}
/// generate node_address from secret key
pub fn address_from_secret_key(data: Vec<u8>) -> String {
let data = <[u8; 32]>::try_from(data.as_slice()).unwrap();
let secret_key = crypto::SecretKey::from(data);
crypto::PublicKey::from(&secret_key).address().to_string()
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-metrics/src/lib.rs | mycelium-metrics/src/lib.rs | //! This crate provides implementations of [`the Metrics trait`](mycelium::metrics::Metrics).
//! 2 options are exposed currently: a NOOP implementation which doesn't record anything,
//! and a prometheus exporter which exposes all metrics in a promtheus compatible format.
mod noop;
pub use noop::NoMetrics;
#[cfg(feature = "prometheus")]
mod prometheus;
#[cfg(feature = "prometheus")]
pub use prometheus::PrometheusExporter;
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-metrics/src/noop.rs | mycelium-metrics/src/noop.rs | use mycelium::metrics::Metrics;
#[derive(Clone)]
pub struct NoMetrics;
impl Metrics for NoMetrics {}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-metrics/src/prometheus.rs | mycelium-metrics/src/prometheus.rs | use axum::{routing::get, Router};
use mycelium::metrics::Metrics;
use prometheus::{
opts, register_int_counter, register_int_counter_vec, register_int_gauge, Encoder, IntCounter,
IntCounterVec, IntGauge, TextEncoder,
};
use tracing::{error, info};
use std::net::SocketAddr;
/// A [`Metrics`] implementation which uses prometheus to expose the metrics to the outside world.
#[derive(Clone)]
pub struct PrometheusExporter {
router_processed_tlvs: IntCounterVec,
router_peer_added: IntCounter,
router_peer_removed: IntCounter,
router_peer_died: IntCounter,
router_route_selection_ran: IntCounter,
router_source_key_expired: IntCounter,
router_expired_routes: IntCounterVec,
router_selected_route_expired: IntCounter,
router_triggered_update: IntCounter,
router_route_packet: IntCounterVec,
router_seqno_action: IntCounterVec,
router_tlv_handling_time_spent: IntCounterVec,
router_update_dead_peer: IntCounter,
router_received_tlvs: IntCounter,
router_tlv_source_died: IntCounter,
router_tlv_discarded: IntCounter,
router_propage_selected_peers_time_spent: IntCounter,
router_update_skipped_route_selection: IntCounter,
router_update_denied_by_filter: IntCounter,
router_update_not_interested: IntCounter,
peer_manager_peer_added: IntCounterVec,
peer_manager_known_peers: IntGauge,
peer_manager_connection_attemps: IntCounterVec,
}
impl PrometheusExporter {
/// Create a new [`PrometheusExporter`].
pub fn new() -> Self {
Self {
router_processed_tlvs: register_int_counter_vec!(
opts!(
"mycelium_router_processed_tlvs",
"Amount of processed TLV's from peers, by type of TLV"
), &["tlv_type"]
).expect("Can register int counter vec in default registry"),
router_peer_added: register_int_counter!(
"mycelium_router_peer_added",
"Amount of times a peer was added to the router"
).expect("Can register int counter in default registry"),
router_peer_removed: register_int_counter!(
"mycelium_router_peer_removed",
"Amount of times a peer was removed from the router"
).expect("Can register int counter in default registry"),
router_peer_died: register_int_counter!(
"mycelium_router_peer_died",
"Amount of times the router noticed a peer was dead, or the peer noticed itself and informed the router",
).expect("Can register int counter in default registry"),
router_route_selection_ran: register_int_counter!(
"mycelium_router_route_selections",
"Amount of times a route selection procedure was ran as result of routes expiring or peers being disconnected. Does not include route selection after an update",
).expect("Can register int counte rin default registry"),
router_source_key_expired: register_int_counter!(
"mycelium_router_source_key_expired",
"Amount of source keys expired"
)
.expect("Can register int counter in default registry"),
router_expired_routes: register_int_counter_vec!(
opts!(
"mycelium_router_expired_routes",
"Route expiration events and the action taken on the route",
),
&["action"]
)
.expect("Can register int counter vec in default registry"),
router_selected_route_expired: register_int_counter!(
"mycelium_router_selected_route_expired",
"Amount of times a selected route in the routing table expired"
)
.expect("Can register int counter in default registry"),
router_triggered_update: register_int_counter!(
"mycelium_router_triggered_updates",
"Amount of triggered updates sent"
)
.expect("Can register int counter in default registry"),
router_route_packet: register_int_counter_vec!(
opts!(
"mycelium_router_packets_routed",
"What happened to a routed data packet"
),
&["verdict"],
)
.expect("Can register int counter vec in default registry"),
router_seqno_action: register_int_counter_vec!(
opts!(
"mycelium_router_seqno_handling",
"What happened to a received seqno request",
),
&["action"],
)
.expect("Can register int counter vec in default registry"),
router_tlv_handling_time_spent: register_int_counter_vec!(
opts!(
"mycelium_router_tlv_handling_time",
"Amount of time spent handling incoming TLV packets, in nanoseconds",
),
&["tlv_type"],
)
.expect("Can register an int counter vec in default registry"),
router_update_dead_peer: register_int_counter!(
"mycelium_router_update_dead_peer",
"Amount of updates we tried to send to a peer, where we found the peer to be dead before actually sending"
)
.expect("Can register an int counter in default registry"),
router_received_tlvs: register_int_counter!(
"mycelium_router_received_tlvs",
"Amount of tlv's received by peers",
)
.expect("Can register an int counter in the default registry"),
router_tlv_source_died: register_int_counter!(
"mycelium_router_tlv_source_died",
"Dropped TLV's which have been received, but where the peer has died before they could be processed",
)
.expect("Can register an int counter in default registry"),
router_tlv_discarded: register_int_counter!(
"mycelium_router_tlv_discarded",
"Dropped TLV's which have been received, but where not processed because the router couldn't keep up",
)
.expect("Can register an int counter in default registry"),
router_propage_selected_peers_time_spent: register_int_counter!(
"mycelium_router_propagate_selected_route_time",
"Time spent in the propagate_selected_route task, which periodically announces selected routes to peers. Measurement is in nanoseconds",
)
.expect("Can register an int counter in default registry"),
router_update_skipped_route_selection: register_int_counter!(
"mycelium_router_update_skipped_route_selection",
"Updates which were processed but did not run the route selection step, because the updated route could not be selected anyway",
)
.expect("Can register an int counter in default registry"),
router_update_denied_by_filter: register_int_counter!(
"mycelium_router_update_denied",
"Updates which were received and immediately denied by a configured filter",
)
.expect("Can register an int counter in default registry"),
router_update_not_interested: register_int_counter!(
"mycelium_router_update_not_interested",
"Updates which were allowed by the configured filters, but not of interest as they were either not feasible, or retractions, for an unknown subnet",
)
.expect("Can register an int counter in default registry"),
peer_manager_peer_added: register_int_counter_vec!(
opts!(
"mycelium_peer_manager_peers_added",
"Peers added to the peer manager at runtime, by peer type"
),
&["peer_type"],
)
.expect("Can register int counter vec in default registry"),
peer_manager_known_peers: register_int_gauge!(
"mycelium_peer_manager_known_peers",
"Amount of known peers in the peer manager"
)
.expect("Can register int gauge in default registry"),
peer_manager_connection_attemps: register_int_counter_vec!(
opts!(
"mycelium_peer_manager_connection_attempts",
"Count how many connections the peer manager started to remotes, and finished"
),
&["connection_state"]
)
.expect("Can register int counter vec in the default registry"),
}
}
/// Spawns a HTTP server on the provided [`SocketAddr`], to export the gathered metrics. Metrics
/// are served under the /metrics endpoint.
pub fn spawn(self, listen_addr: SocketAddr) {
info!("Enable system metrics on http://{listen_addr}/metrics");
let app = Router::new().route("/metrics", get(serve_metrics));
tokio::spawn(async move {
let listener = match tokio::net::TcpListener::bind(listen_addr).await {
Ok(listener) => listener,
Err(e) => {
error!("Failed to bind listener for Http metrics server: {e}");
error!("metrics disabled");
return;
}
};
let server = axum::serve(listener, app.into_make_service());
if let Err(e) = server.await {
error!("Http API server error: {e}");
}
});
}
}
/// Expose prometheus formatted metrics
async fn serve_metrics() -> String {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
// Gather the metrics.
let metric_families = prometheus::gather();
// Encode them to send.
encoder
.encode(&metric_families, &mut buffer)
.expect("Can encode metrics");
String::from_utf8(buffer).expect("Metrics are encoded in valid prometheus format")
}
impl Metrics for PrometheusExporter {
#[inline]
fn router_process_hello(&self) {
self.router_processed_tlvs
.with_label_values(&["hello"])
.inc()
}
#[inline]
fn router_process_ihu(&self) {
self.router_processed_tlvs.with_label_values(&["ihu"]).inc()
}
#[inline]
fn router_process_seqno_request(&self) {
self.router_processed_tlvs
.with_label_values(&["seqno_request"])
.inc()
}
#[inline]
fn router_process_route_request(&self, wildcard: bool) {
let label = if wildcard {
"wildcard_route_request"
} else {
"route_request"
};
self.router_processed_tlvs.with_label_values(&[label]).inc()
}
#[inline]
fn router_process_update(&self) {
self.router_processed_tlvs
.with_label_values(&["update"])
.inc()
}
#[inline]
fn router_peer_added(&self) {
self.router_peer_added.inc()
}
#[inline]
fn router_peer_removed(&self) {
self.router_peer_removed.inc()
}
#[inline]
fn router_peer_died(&self) {
self.router_peer_died.inc()
}
#[inline]
fn router_route_selection_ran(&self) {
self.router_route_selection_ran.inc()
}
#[inline]
fn router_source_key_expired(&self) {
self.router_source_key_expired.inc()
}
#[inline]
fn router_route_key_expired(&self, removed: bool) {
let label = if removed { "removed" } else { "retracted" };
self.router_expired_routes.with_label_values(&[label]).inc()
}
#[inline]
fn router_selected_route_expired(&self) {
self.router_selected_route_expired.inc()
}
#[inline]
fn router_triggered_update(&self) {
self.router_triggered_update.inc()
}
#[inline]
fn router_route_packet_local(&self) {
self.router_route_packet.with_label_values(&["local"]).inc()
}
#[inline]
fn router_route_packet_forward(&self) {
self.router_route_packet
.with_label_values(&["forward"])
.inc()
}
#[inline]
fn router_route_packet_ttl_expired(&self) {
self.router_route_packet
.with_label_values(&["ttl_expired"])
.inc()
}
#[inline]
fn router_route_packet_no_route(&self) {
self.router_route_packet
.with_label_values(&["no_route"])
.inc()
}
#[inline]
fn router_seqno_request_reply_local(&self) {
self.router_seqno_action
.with_label_values(&["reply_local"])
.inc()
}
#[inline]
fn router_seqno_request_bump_seqno(&self) {
self.router_seqno_action
.with_label_values(&["bump_seqno"])
.inc()
}
#[inline]
fn router_seqno_request_dropped_ttl(&self) {
self.router_seqno_action
.with_label_values(&["ttl_expired"])
.inc()
}
#[inline]
fn router_seqno_request_forward_feasible(&self) {
self.router_seqno_action
.with_label_values(&["forward_feasible"])
.inc()
}
#[inline]
fn router_seqno_request_forward_unfeasible(&self) {
self.router_seqno_action
.with_label_values(&["forward_unfeasible"])
.inc()
}
#[inline]
fn router_seqno_request_unhandled(&self) {
self.router_seqno_action
.with_label_values(&["unhandled"])
.inc()
}
#[inline]
fn router_time_spent_handling_tlv(&self, duration: std::time::Duration, tlv_type: &str) {
self.router_tlv_handling_time_spent
.with_label_values(&[tlv_type])
.inc_by(duration.as_nanos() as u64)
}
#[inline]
fn router_update_dead_peer(&self) {
self.router_update_dead_peer.inc()
}
#[inline]
fn router_received_tlv(&self) {
self.router_received_tlvs.inc()
}
#[inline]
fn router_tlv_source_died(&self) {
self.router_tlv_source_died.inc()
}
#[inline]
fn router_tlv_discarded(&self) {
self.router_tlv_discarded.inc()
}
#[inline]
fn router_time_spent_periodic_propagating_selected_routes(
&self,
duration: std::time::Duration,
) {
self.router_propage_selected_peers_time_spent
.inc_by(duration.as_nanos() as u64)
}
#[inline]
fn router_update_skipped_route_selection(&self) {
self.router_update_skipped_route_selection.inc()
}
#[inline]
fn router_update_denied_by_filter(&self) {
self.router_update_denied_by_filter.inc()
}
#[inline]
fn router_update_not_interested(&self) {
self.router_update_not_interested.inc()
}
#[inline]
fn peer_manager_peer_added(&self, pt: mycelium::peer_manager::PeerType) {
let label = match pt {
mycelium::peer_manager::PeerType::Static => "static",
mycelium::peer_manager::PeerType::Inbound => "inbound",
mycelium::peer_manager::PeerType::LinkLocalDiscovery => "link_local",
};
self.peer_manager_peer_added
.with_label_values(&[label])
.inc()
}
#[inline]
fn peer_manager_known_peers(&self, amount: usize) {
self.peer_manager_known_peers.set(amount as i64)
}
#[inline]
fn peer_manager_connection_attempted(&self) {
self.peer_manager_connection_attemps
.with_label_values(&["started"])
.inc()
}
#[inline]
fn peer_manager_connection_finished(&self) {
self.peer_manager_connection_attemps
.with_label_values(&["finished"])
.inc()
}
}
impl Default for PrometheusExporter {
fn default() -> Self {
Self::new()
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/api.rs | mycelium-ui/src/api.rs | use mycelium::endpoint::Endpoint;
use mycelium_api::AddPeer;
use std::net::SocketAddr;
use urlencoding::encode;
pub async fn get_peers(
server_addr: SocketAddr,
) -> Result<Vec<mycelium::peer_manager::PeerStats>, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<Vec<mycelium::peer_manager::PeerStats>>().await {
Err(e) => Err(e),
Ok(peers) => Ok(peers),
},
}
}
pub async fn get_selected_routes(
server_addr: SocketAddr,
) -> Result<Vec<mycelium_api::Route>, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/selected");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<Vec<mycelium_api::Route>>().await {
Err(e) => Err(e),
Ok(selected_routes) => Ok(selected_routes),
},
}
}
pub async fn get_fallback_routes(
server_addr: SocketAddr,
) -> Result<Vec<mycelium_api::Route>, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/fallback");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<Vec<mycelium_api::Route>>().await {
Err(e) => Err(e),
Ok(selected_routes) => Ok(selected_routes),
},
}
}
pub async fn get_node_info(server_addr: SocketAddr) -> Result<mycelium_api::Info, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<mycelium_api::Info>().await {
Err(e) => Err(e),
Ok(node_info) => Ok(node_info),
},
}
}
pub async fn remove_peer(
server_addr: SocketAddr,
peer_endpoint: Endpoint,
) -> Result<(), reqwest::Error> {
let full_endpoint = format!(
"{}://{}",
peer_endpoint.proto().to_string().to_lowercase(),
peer_endpoint.address()
);
let encoded_full_endpoint = encode(&full_endpoint);
let request_url = format!(
"http://{}/api/v1/admin/peers/{}",
server_addr, encoded_full_endpoint
);
let client = reqwest::Client::new();
client
.delete(request_url)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub async fn add_peer(
server_addr: SocketAddr,
peer_endpoint: String,
) -> Result<(), reqwest::Error> {
println!("adding peer: {peer_endpoint}");
let client = reqwest::Client::new();
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
client
.post(request_url)
.json(&AddPeer {
endpoint: peer_endpoint,
})
.send()
.await?
.error_for_status()?;
Ok(())
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/main.rs | mycelium-ui/src/main.rs | #![allow(non_snake_case)]
// Disable terminal popup on Windows
#![cfg_attr(feature = "bundle", windows_subsystem = "windows")]
mod api;
mod components;
use components::home::Home;
use components::peers::Peers;
use components::routes::Routes;
use dioxus::prelude::*;
use mycelium::{endpoint::Endpoint, peer_manager::PeerStats};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr, SocketAddr},
};
const _: manganis::Asset = manganis::asset!("assets/styles.css");
const DEFAULT_SERVER_ADDR: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8989);
fn main() {
// Init logger
dioxus_logger::init(tracing::Level::INFO).expect("failed to init logger");
let config = dioxus::desktop::Config::new()
.with_custom_head(r#"<link rel="stylesheet" href="styles.css">"#.to_string());
LaunchBuilder::desktop().with_cfg(config).launch(App);
// dioxus::launch(App);
}
#[component]
fn App() -> Element {
use_context_provider(|| Signal::new(ServerAddress(DEFAULT_SERVER_ADDR)));
use_context_provider(|| Signal::new(ServerConnected(false)));
use_context_provider(|| {
Signal::new(PeerSignalMapping(
HashMap::<Endpoint, Signal<PeerStats>>::new(),
))
});
use_context_provider(|| Signal::new(StopFetchingPeerSignal(false)));
rsx! {
Router::<Route> {
config: || {
RouterConfig::default().on_update(|state| {
use_context::<Signal<StopFetchingPeerSignal>>().write().0 = state.current() != Route::Peers {};
(state.current() == Route::Peers {}).then_some(NavigationTarget::Internal(Route::Peers {}))
})
}
}
}
}
#[derive(Clone, Routable, Debug, PartialEq)]
#[rustfmt::skip]
pub enum Route {
#[layout(components::layout::Layout)]
#[route("/")]
Home {},
#[route("/peers")]
Peers,
#[route("/routes")]
Routes,
#[end_layout]
#[route("/:..route")]
PageNotFound { route: Vec<String> },
}
//
#[derive(Clone, PartialEq)]
struct SearchState {
query: String,
column: String,
}
// This signal is used to stop the loop that keeps fetching information about the peers when
// looking at the peers table, e.g. when the user goes back to Home or Routes page.
#[derive(Clone, PartialEq)]
struct StopFetchingPeerSignal(bool);
#[derive(Clone, PartialEq)]
struct ServerAddress(SocketAddr);
#[derive(Clone, PartialEq)]
struct ServerConnected(bool);
#[derive(Clone, PartialEq)]
struct PeerSignalMapping(HashMap<Endpoint, Signal<PeerStats>>);
pub fn get_sort_indicator(
sort_column: Signal<String>,
sort_direction: Signal<SortDirection>,
column: String,
) -> String {
if *sort_column.read() == column {
match *sort_direction.read() {
SortDirection::Ascending => " ↑".to_string(),
SortDirection::Descending => " ↓".to_string(),
}
} else {
"".to_string()
}
}
#[component]
fn PageNotFound(route: Vec<String>) -> Element {
rsx! {
p { "Page not found"}
}
}
#[derive(Clone)]
pub enum SortDirection {
Ascending,
Descending,
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/components.rs | mycelium-ui/src/components.rs | pub mod home;
pub mod layout;
pub mod peers;
pub mod routes;
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/components/home.rs | mycelium-ui/src/components/home.rs | use crate::api;
use crate::{ServerAddress, ServerConnected};
use dioxus::prelude::*;
use std::net::SocketAddr;
use std::str::FromStr;
#[component]
pub fn Home() -> Element {
let mut server_addr = use_context::<Signal<ServerAddress>>();
let mut new_address = use_signal(|| server_addr.read().0.to_string());
let mut node_info = use_resource(fetch_node_info);
let try_connect = move |_| {
if let Ok(addr) = SocketAddr::from_str(&new_address.read()) {
server_addr.write().0 = addr;
node_info.restart();
}
};
rsx! {
div { class: "home-container",
h2 { "Node information" }
div { class: "server-input",
input {
placeholder: "Server address (e.g. 127.0.0.1:8989)",
value: "{new_address}",
oninput: move |evt| new_address.set(evt.value().clone()),
}
button { onclick: try_connect, "Connect" }
}
{match node_info.read().as_ref() {
Some(Ok(info)) => rsx! {
p {
"Node subnet: ",
span { class: "bold", "{info.node_subnet}" }
}
p {
"Node public key: ",
span { class: "bold", "{info.node_pubkey}" }
}
},
Some(Err(e)) => rsx! {
p { class: "error", "Error: {e}" }
},
None => rsx! {
p { "Enter a server address and click 'Connect' to fetch node information." }
}
}}
}
}
}
async fn fetch_node_info() -> Result<mycelium_api::Info, reqwest::Error> {
let server_addr = use_context::<Signal<ServerAddress>>();
let mut server_connected = use_context::<Signal<ServerConnected>>();
let address = server_addr.read().0;
match api::get_node_info(address).await {
Ok(info) => {
server_connected.write().0 = true;
Ok(info)
}
Err(e) => {
server_connected.write().0 = false;
Err(e)
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/components/layout.rs | mycelium-ui/src/components/layout.rs | use crate::{api, Route, ServerAddress};
use dioxus::prelude::*;
use dioxus_free_icons::{icons::fa_solid_icons::FaChevronLeft, Icon};
#[component]
pub fn Layout() -> Element {
let sidebar_collapsed = use_signal(|| false);
rsx! {
div { class: "app-container",
Header {}
div { class: "content-container",
Sidebar { collapsed: sidebar_collapsed }
main { class: if *sidebar_collapsed.read() { "main-content expanded" } else { "main-content" },
Outlet::<Route> {}
}
}
}
}
}
#[component]
pub fn Header() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>();
let fetched_node_info = use_resource(move || api::get_node_info(server_addr.read().0));
rsx! {
header {
h1 { "Mycelium Network Dashboard" }
div { class: "node-info",
{ match &*fetched_node_info.read_unchecked() {
Some(Ok(info)) => rsx! {
span { "Subnet: {info.node_subnet}" }
span { class: "separator", "|" }
span { "Public Key: {info.node_pubkey}" }
},
Some(Err(_)) => rsx! { span { "Error loading node info" } },
None => rsx! { span { "Loading node info..." } },
}}
}
}
}
}
#[component]
pub fn Sidebar(collapsed: Signal<bool>) -> Element {
rsx! {
nav { class: if *collapsed.read() { "sidebar collapsed" } else { "sidebar" },
ul {
li { Link { to: Route::Home {}, "Home" } }
li { Link { to: Route::Peers {}, "Peers" } }
li { Link { to: Route::Routes {}, "Routes" } }
}
}
button { class: if *collapsed.read() { "toggle-sidebar collapsed" } else { "toggle-sidebar" },
onclick: {
let c = *collapsed.read();
move |_| collapsed.set(!c)
},
Icon {
icon: FaChevronLeft,
}
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/components/peers.rs | mycelium-ui/src/components/peers.rs | use crate::get_sort_indicator;
use crate::{api, SearchState, ServerAddress, SortDirection, StopFetchingPeerSignal};
use dioxus::prelude::*;
use dioxus_charts::LineChart;
use human_bytes::human_bytes;
use mycelium::{
endpoint::Endpoint,
peer_manager::{PeerStats, PeerType},
};
use std::{
cmp::Ordering,
collections::{HashMap, HashSet, VecDeque},
str::FromStr,
};
use tracing::{error, info};
const REFRESH_RATE_MS: u64 = 500;
const MAX_DATA_POINTS: usize = 8; // displays last 4 seconds
#[component]
pub fn Peers() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>().read().0;
let error = use_signal(|| None::<String>);
let peer_data = use_signal(HashMap::<Endpoint, PeerStats>::new);
let _ = use_resource(move || async move {
to_owned![server_addr, error, peer_data];
loop {
let stop_fetching_signal = use_context::<Signal<StopFetchingPeerSignal>>().read().0;
if !stop_fetching_signal {
match api::get_peers(server_addr).await {
Ok(fetched_peers) => {
peer_data.with_mut(|data| {
// Collect the endpoint from the fetched peers
let fetched_endpoints: HashSet<Endpoint> =
fetched_peers.iter().map(|peer| peer.endpoint).collect();
// Remove peers that are no longer in the fetched data
data.retain(|endpoint, _| fetched_endpoints.contains(endpoint));
// Insert or update the fetched peers
for peer in fetched_peers {
data.insert(peer.endpoint, peer);
}
});
error.set(None);
}
Err(e) => {
eprintln!("Error fetching peers: {}", e);
error.set(Some(format!(
"An error has occurred while fetching peers: {}",
e
)))
}
}
} else {
break;
}
tokio::time::sleep(tokio::time::Duration::from_millis(REFRESH_RATE_MS)).await;
}
});
rsx! {
if let Some(err) = error.read().as_ref() {
div { class: "error-message", "{err}" }
} else {
PeersTable { peer_data: peer_data }
}
}
}
#[component]
fn PeersTable(peer_data: Signal<HashMap<Endpoint, PeerStats>>) -> Element {
let mut current_page = use_signal(|| 0);
let items_per_page = 20;
let mut sort_column = use_signal(|| "Protocol".to_string());
let mut sort_direction = use_signal(|| SortDirection::Ascending);
let peers_len = peer_data.read().len();
// Pagination
let mut change_page = move |delta: i32| {
let cur_page = *current_page.read() as i32;
current_page.set(
(cur_page + delta)
.max(0)
.min((peers_len - 1) as i32 / items_per_page),
);
};
// Sorting
let mut sort_peers_signal = move |column: String| {
if column == *sort_column.read() {
let new_sort_direction = match *sort_direction.read() {
SortDirection::Ascending => SortDirection::Descending,
SortDirection::Descending => SortDirection::Ascending,
};
sort_direction.set(new_sort_direction);
} else {
sort_column.set(column);
sort_direction.set(SortDirection::Descending);
}
// When sorting, we should jump back to the first page
current_page.set(0);
};
let sorted_peers = use_memo(move || {
let mut peers = peer_data.read().values().cloned().collect::<Vec<_>>();
sort_peers(&mut peers, &sort_column.read(), &sort_direction.read());
peers
});
// Searching
let mut search_state = use_signal(|| SearchState {
query: String::new(),
column: "Protocol".to_string(),
});
let filtered_peers = use_memo(move || {
let query = search_state.read().query.to_lowercase();
let column = &search_state.read().column;
let sorted_peers = sorted_peers.read();
sorted_peers
.iter()
.filter(|peer| match column.as_str() {
"Protocol" => peer
.endpoint
.proto()
.to_string()
.to_lowercase()
.contains(&query),
"Address" => peer
.endpoint
.address()
.ip()
.to_string()
.to_lowercase()
.contains(&query),
"Port" => peer
.endpoint
.address()
.port()
.to_string()
.to_lowercase()
.contains(&query),
"Type" => peer.pt.to_string().to_lowercase().contains(&query),
"Connection State" => peer
.connection_state
.to_string()
.to_lowercase()
.contains(&query),
"Tx bytes" => peer.tx_bytes.to_string().to_lowercase().contains(&query),
"Rx bytes" => peer.rx_bytes.to_string().to_lowercase().contains(&query),
_ => false,
})
.cloned()
.collect::<Vec<PeerStats>>()
});
let peers_len = filtered_peers.read().len();
let start = current_page * items_per_page;
let end = (start + items_per_page).min(peers_len as i32);
let current_peers = filtered_peers.read()[start as usize..end as usize].to_vec();
// Expanding peer to show rx/tx bytes graphs
let mut expanded_rows = use_signal(|| ExpandedRows(HashSet::new()));
let mut toggle_row_expansion = move |peer_endpoint: String| {
expanded_rows.with_mut(|rows| {
if rows.0.contains(&peer_endpoint) {
rows.0.remove(&peer_endpoint);
} else {
rows.0.insert(peer_endpoint);
}
});
};
let toggle_add_peer_input = use_signal(|| true); //TODO: fix UX for adding peer
let mut add_peer_error = use_signal(|| None::<String>);
let add_peer = move |peer_endpoint: String| {
spawn(async move {
let server_addr = use_context::<Signal<ServerAddress>>().read().0;
// Check correct endpoint format and add peer
match Endpoint::from_str(&peer_endpoint) {
Ok(_) => {
if let Err(e) = api::add_peer(server_addr, peer_endpoint.clone()).await {
error!("Error adding peer: {e}");
add_peer_error.set(Some(format!("Error adding peer: {}", e)));
} else {
info!("Succesfully added peer: {peer_endpoint}");
}
}
Err(e) => {
error!("Incorrect peer endpoint: {e}");
add_peer_error.set(Some(format!("Incorrect peer endpoint: {}", e)));
}
}
});
};
let mut new_peer_endpoint = use_signal(|| "".to_string());
rsx! {
div { class: "peers-table",
h2 { "Peers" }
div { class: "search-and-add-container",
div { class: "search-container",
input {
placeholder: "Search...",
value: "{search_state.read().query}",
oninput: move |evt| search_state.write().query.clone_from(&evt.value()),
}
select {
value: "{search_state.read().column}",
onchange: move |evt| search_state.write().column.clone_from(&evt.value()),
option { value: "Protocol", "Protocol" }
option { value: "Address", "Address" }
option { value: "Port", "Port" }
option { value: "Type", "Type" }
option { value: "Connection State", "Connection State" }
option { value: "Tx bytes", "Tx bytes" }
option { value: "Rx bytes", "Rx bytes" }
}
}
div { class: "add-peer-container",
div { class: "add-peer-input-button",
if *toggle_add_peer_input.read() {
div { class: "expanded-add-peer-container",
input {
placeholder: "tcp://ipaddr:port",
oninput: move |evt| new_peer_endpoint.set(evt.value())
}
}
}
button {
onclick: move |_| add_peer(new_peer_endpoint.read().to_string()),
"Add peer"
}
}
if let Some(error) = add_peer_error.read().as_ref() {
div { class: "add-peer-error", "{error}" }
}
}
}
div { class: "table-container",
table {
thead {
tr {
th { class: "protocol-column",
onclick: move |_| sort_peers_signal("Protocol".to_string()),
"Protocol {get_sort_indicator(sort_column, sort_direction, \"Protocol\".to_string())}"
}
th { class: "address-column",
onclick: move |_| sort_peers_signal("Address".to_string()),
"Address {get_sort_indicator(sort_column, sort_direction, \"Address\".to_string())}"
}
th { class: "port-column",
onclick: move |_| sort_peers_signal("Port".to_string()),
"Port {get_sort_indicator(sort_column, sort_direction, \"Port\".to_string())}"
}
th { class: "type-column",
onclick: move |_| sort_peers_signal("Type".to_string()),
"Type {get_sort_indicator(sort_column, sort_direction, \"Type\".to_string())}"
}
th { class: "connection-state-column",
onclick: move |_| sort_peers_signal("Connection State".to_string()),
"Connection State {get_sort_indicator(sort_column, sort_direction, \"Connection State\".to_string())}"
}
th { class: "tx-bytes-column",
onclick: move |_| sort_peers_signal("Tx bytes".to_string()),
"Tx bytes {get_sort_indicator(sort_column, sort_direction, \"Tx bytes\".to_string())}"
}
th { class: "rx-bytes-column",
onclick: move |_| sort_peers_signal("Rx bytes".to_string()),
"Rx bytes {get_sort_indicator(sort_column, sort_direction, \"Rx bytes\".to_string())}"
}
}
}
tbody {
for peer in current_peers.into_iter() {
tr {
onclick: move |_| toggle_row_expansion(peer.endpoint.to_string()),
td { class: "protocol-column", "{peer.endpoint.proto()}" }
td { class: "address-column", "{peer.endpoint.address().ip()}" }
td { class: "port-column", "{peer.endpoint.address().port()}" }
td { class: "type-column", "{peer.pt}" }
td { class: "connection-state-column", "{peer.connection_state}" }
td { class: "tx-bytes-column", "{human_bytes(peer.tx_bytes as f64)}" }
td { class: "rx-bytes-column", "{human_bytes(peer.rx_bytes as f64)}" }
}
{
let peer_expanded = expanded_rows.read().0.contains(&peer.endpoint.to_string());
if peer_expanded {
rsx! {
ExpandedPeerRow {
peer_endpoint: peer.endpoint,
peer_data: peer_data,
on_close: move |_| toggle_row_expansion(peer.endpoint.to_string()),
}
}
} else {
rsx! {}
}
}
}
}
}
}
div { class: "pagination",
button {
disabled: *current_page.read() == 0,
onclick: move |_| change_page(-1),
"Previous"
}
span { "Page {current_page + 1}" }
button {
disabled: (current_page + 1) * items_per_page >= peers_len as i32,
onclick: move |_| change_page(1),
"Next"
}
}
}
}
}
#[derive(Clone)]
struct BandwidthData {
tx_bytes: u64,
rx_bytes: u64,
timestamp: tokio::time::Duration,
}
#[component]
fn ExpandedPeerRow(
peer_endpoint: Endpoint,
peer_data: Signal<HashMap<Endpoint, PeerStats>>,
on_close: EventHandler<()>,
) -> Element {
let bandwidth_data = use_signal(VecDeque::<BandwidthData>::new);
let start_time = use_signal(tokio::time::Instant::now);
use_future(move || {
to_owned![bandwidth_data, start_time, peer_data, peer_endpoint];
async move {
let mut last_tx = 0;
let mut last_rx = 0;
if let Some(peer_stats) = peer_data.read().get(&peer_endpoint) {
last_tx = peer_stats.tx_bytes;
last_rx = peer_stats.rx_bytes;
}
loop {
let current_time = tokio::time::Instant::now();
let elapsed_time = current_time.duration_since(*start_time.read());
if let Some(peer_stats) = peer_data.read().get(&peer_endpoint) {
let tx_rate =
(peer_stats.tx_bytes - last_tx) as f64 / (REFRESH_RATE_MS as f64 / 1000.0);
let rx_rate =
(peer_stats.rx_bytes - last_rx) as f64 / (REFRESH_RATE_MS as f64 / 1000.0);
bandwidth_data.with_mut(|data| {
let new_data = BandwidthData {
tx_bytes: tx_rate as u64,
rx_bytes: rx_rate as u64,
timestamp: elapsed_time,
};
data.push_back(new_data);
if data.len() > MAX_DATA_POINTS {
data.pop_front();
}
});
last_tx = peer_stats.tx_bytes;
last_rx = peer_stats.rx_bytes;
}
tokio::time::sleep(tokio::time::Duration::from_millis(REFRESH_RATE_MS)).await;
}
}
});
let tx_data = use_memo(move || {
bandwidth_data
.read()
.iter()
.map(|d| d.tx_bytes as f32)
.collect::<Vec<f32>>()
});
let rx_data = use_memo(move || {
bandwidth_data
.read()
.iter()
.map(|d| d.rx_bytes as f32)
.collect::<Vec<f32>>()
});
let labels = bandwidth_data
.read()
.iter()
.map(|d| format!("{:.1}", d.timestamp.as_secs_f64()))
.collect::<Vec<String>>();
let remove_peer = move |_| {
spawn(async move {
println!("Removing peer: {}", peer_endpoint);
let server_addr = use_context::<Signal<ServerAddress>>().read().0;
match api::remove_peer(server_addr, peer_endpoint).await {
Ok(_) => on_close.call(()),
Err(e) => eprintln!("Error removing peer: {e}"),
}
});
};
rsx! {
tr { class: "expanded-row",
td { colspan: "7",
div { class: "expanded-content",
div { class: "graph-container",
// Tx chart
div { class: "graph-title", "Tx Bytes/s" }
LineChart {
show_grid: false,
show_dots: false,
padding_top: 80,
padding_left: 100,
padding_right: 80,
padding_bottom: 80,
label_interpolation: (|v| human_bytes(v as f64).to_string()) as fn(f32)-> String,
series: vec![tx_data.read().to_vec()],
labels: labels.clone(),
series_labels: vec!["Tx Bytes/s".into()],
}
}
div { class: "graph-container",
// Rx chart
div { class: "graph-title", "Rx Bytes/s" }
LineChart {
show_grid: false,
show_dots: false,
padding_top: 80,
padding_left: 100,
padding_right: 80,
padding_bottom: 80,
label_interpolation: (|v| human_bytes(v as f64).to_string()) as fn(f32)-> String,
series: vec![rx_data.read().clone()],
labels: labels.clone(),
series_labels: vec!["Rx Bytes/s".into()],
}
}
div { class: "button-container",
button { class: "close-button",
onclick: move |_| on_close.call(()),
"Close"
}
button { class: "remove-button",
onclick: remove_peer,
"Remove peer"
}
}
}
}
}
}
}
#[derive(Clone, PartialEq)]
struct ExpandedRows(HashSet<String>);
fn sort_peers(
peers: &mut [mycelium::peer_manager::PeerStats],
column: &str,
direction: &SortDirection,
) {
peers.sort_by(|a, b| {
let cmp = match column {
"Protocol" => a.endpoint.proto().cmp(&b.endpoint.proto()),
"Address" => a.endpoint.address().ip().cmp(&b.endpoint.address().ip()),
"Port" => a
.endpoint
.address()
.port()
.cmp(&b.endpoint.address().port()),
"Type" => PeerTypeWrapper(a.pt.clone()).cmp(&PeerTypeWrapper(b.pt.clone())),
"Connection State" => a.connection_state.cmp(&b.connection_state),
"Tx bytes" => a.tx_bytes.cmp(&b.tx_bytes),
"Rx bytes" => a.rx_bytes.cmp(&b.rx_bytes),
_ => Ordering::Equal,
};
match direction {
SortDirection::Ascending => cmp,
SortDirection::Descending => cmp.reverse(),
}
});
}
pub struct PeerTypeWrapper(pub mycelium::peer_manager::PeerType);
impl Ord for PeerTypeWrapper {
fn cmp(&self, other: &Self) -> Ordering {
match (&self.0, &other.0) {
(PeerType::Static, PeerType::Static) => Ordering::Equal,
(PeerType::Static, _) => Ordering::Less,
(PeerType::LinkLocalDiscovery, PeerType::Static) => Ordering::Greater,
(PeerType::LinkLocalDiscovery, PeerType::LinkLocalDiscovery) => Ordering::Equal,
(PeerType::LinkLocalDiscovery, PeerType::Inbound) => Ordering::Less,
(PeerType::Inbound, PeerType::Inbound) => Ordering::Equal,
(PeerType::Inbound, _) => Ordering::Greater,
}
}
}
impl PartialOrd for PeerTypeWrapper {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for PeerTypeWrapper {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for PeerTypeWrapper {}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-ui/src/components/routes.rs | mycelium-ui/src/components/routes.rs | use std::cmp::Ordering;
use crate::api;
use crate::{get_sort_indicator, SearchState, ServerAddress, SortDirection};
use dioxus::prelude::*;
#[component]
pub fn Routes() -> Element {
rsx! {
SelectedRoutesTable {}
FallbackRoutesTable {}
}
}
#[component]
pub fn SelectedRoutesTable() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>();
let fetched_selected_routes =
use_resource(move || api::get_selected_routes(server_addr.read().0));
match &*fetched_selected_routes.read_unchecked() {
Some(Ok(routes)) => {
rsx! { RoutesTable { routes: routes.clone(), table_name: "Selected"} }
}
Some(Err(e)) => rsx! { div { "An error has occurred while fetching selected routes: {e}" }},
None => rsx! { div { "Loading selected routes..." }},
}
}
#[component]
pub fn FallbackRoutesTable() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>();
let fetched_fallback_routes =
use_resource(move || api::get_fallback_routes(server_addr.read().0));
match &*fetched_fallback_routes.read_unchecked() {
Some(Ok(routes)) => {
rsx! { RoutesTable { routes: routes.clone(), table_name: "Fallback"} }
}
Some(Err(e)) => rsx! { div { "An error has occurred while fetching fallback routes: {e}" }},
None => rsx! { div { "Loading fallback routes..." }},
}
}
#[component]
fn RoutesTable(routes: Vec<mycelium_api::Route>, table_name: String) -> Element {
let mut current_page = use_signal(|| 0);
let items_per_page = 10;
let mut sort_column = use_signal(|| "Subnet".to_string());
let mut sort_direction = use_signal(|| SortDirection::Descending);
let routes_len = routes.len();
let mut change_page = move |delta: i32| {
let cur_page = *current_page.read() as i32;
current_page.set(
(cur_page + delta)
.max(0)
.min((routes_len - 1) as i32 / items_per_page as i32) as usize,
);
};
let mut sort_routes_signal = move |column: String| {
if column == *sort_column.read() {
let new_sort_direction = match *sort_direction.read() {
SortDirection::Ascending => SortDirection::Descending,
SortDirection::Descending => SortDirection::Ascending,
};
sort_direction.set(new_sort_direction);
} else {
sort_column.set(column);
sort_direction.set(SortDirection::Ascending);
}
current_page.set(0);
};
let sorted_routes = use_memo(move || {
let mut sorted = routes.clone();
sort_routes(&mut sorted, &sort_column.read(), &sort_direction.read());
sorted
});
let mut search_state = use_signal(|| SearchState {
query: String::new(),
column: "Subnet".to_string(),
});
let filtered_routes = use_memo(move || {
let query = search_state.read().query.to_lowercase();
let column = &search_state.read().column;
sorted_routes
.read()
.iter()
.filter(|route| match column.as_str() {
"Subnet" => route.subnet.to_string().to_lowercase().contains(&query),
"Next-hop" => route.next_hop.to_string().to_lowercase().contains(&query),
"Metric" => route.metric.to_string().to_lowercase().contains(&query),
"Seqno" => route.seqno.to_string().to_lowercase().contains(&query),
_ => false,
})
.cloned()
.collect::<Vec<_>>()
});
let routes_len = filtered_routes.len();
let start = current_page * items_per_page;
let end = (start + items_per_page).min(routes_len);
let current_routes = &filtered_routes.read()[start..end];
rsx! {
div { class: "{table_name.to_lowercase()}-routes",
h2 { "{table_name} Routes" }
div { class: "search-container",
input {
placeholder: "Search...",
value: "{search_state.read().query}",
oninput: move |evt| search_state.write().query.clone_from(&evt.value()),
}
select {
value: "{search_state.read().column}",
onchange: move |evt| search_state.write().column.clone_from(&evt.value()),
option { value: "Subnet", "Subnet" }
option { value: "Next-hop", "Next-hop" }
option { value: "Metric", "Metric" }
option { value: "Seqno", "Seqno" }
}
}
div { class: "table-container",
table {
thead {
tr {
th { class: "subnet-column",
onclick: move |_| sort_routes_signal("Subnet".to_string()),
"Subnet {get_sort_indicator(sort_column, sort_direction, \"Subnet\".to_string())}"
}
th { class: "next-hop-column",
onclick: move |_| sort_routes_signal("Next-hop".to_string()),
"Next-hop {get_sort_indicator(sort_column, sort_direction, \"Next-hop\".to_string())}"
}
th { class: "metric-column",
onclick: move |_| sort_routes_signal("Metric".to_string()),
"Metric {get_sort_indicator(sort_column, sort_direction, \"Metric\".to_string())}"
}
th { class: "seqno_column",
onclick: move |_| sort_routes_signal("Seqno".to_string()),
"Seqno {get_sort_indicator(sort_column, sort_direction, \"Seqno\".to_string())}"
}
}
}
tbody {
for route in current_routes {
tr {
td { class: "subnet-column", "{route.subnet}" }
td { class: "next-hop-column", "{route.next_hop}" }
td { class: "metric-column", "{route.metric}" }
td { class: "seqno-column", "{route.seqno}" }
}
}
}
}
}
div { class: "pagination",
button {
disabled: *current_page.read() == 0,
onclick: move |_| change_page(-1),
"Previous"
}
span { "Page {current_page + 1}" }
button {
disabled: (current_page + 1) * items_per_page >= routes_len,
onclick: move |_| change_page(1),
"Next"
}
}
}
}
}
fn sort_routes(routes: &mut [mycelium_api::Route], column: &str, direction: &SortDirection) {
routes.sort_by(|a, b| {
let cmp = match column {
"Subnet" => a.subnet.cmp(&b.subnet),
"Next-hop" => a.next_hop.cmp(&b.next_hop),
"Metric" => a.metric.cmp(&b.metric),
"Seqno" => a.seqno.cmp(&b.seqno),
_ => Ordering::Equal,
};
match direction {
SortDirection::Ascending => cmp,
SortDirection::Descending => cmp.reverse(),
}
});
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table.rs | mycelium/src/routing_table.rs | use std::{
net::{IpAddr, Ipv6Addr},
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use ip_network_table_deps_treebitmap::IpLookupTable;
use iter::{RoutingTableNoRouteIter, RoutingTableQueryIter};
use subnet_entry::SubnetEntry;
use tokio::{select, sync::mpsc, time::Duration};
use tokio_util::sync::CancellationToken;
use tracing::{error, trace};
use crate::{crypto::SharedSecret, peer::Peer, subnet::Subnet};
pub use iter::RoutingTableIter;
pub use iter_mut::RoutingTableIterMut;
pub use no_route::NoRouteSubnet;
pub use queried_subnet::QueriedSubnet;
pub use route_entry::RouteEntry;
pub use route_key::RouteKey;
pub use route_list::RouteList;
mod iter;
mod iter_mut;
mod no_route;
mod queried_subnet;
mod route_entry;
mod route_key;
mod route_list;
mod subnet_entry;
const NO_ROUTE_EXPIRATION: Duration = Duration::from_secs(60);
pub enum Routes {
Exist(RouteListReadGuard),
Queried,
NoRoute,
None,
}
impl Routes {
/// Returns the selected route if one exists.
pub fn selected(&self) -> Option<&RouteEntry> {
if let Routes::Exist(routes) = self {
routes.selected()
} else {
None
}
}
/// Returns true if there are no routes
pub fn is_none(&self) -> bool {
!matches!(self, Routes::Exist { .. })
}
}
impl From<&SubnetEntry> for Routes {
fn from(value: &SubnetEntry) -> Self {
match value {
SubnetEntry::Exists { list } => {
Routes::Exist(RouteListReadGuard { inner: list.load() })
}
SubnetEntry::Queried { .. } => Routes::Queried,
SubnetEntry::NoRoute { .. } => Routes::NoRoute,
}
}
}
impl From<Option<&SubnetEntry>> for Routes {
fn from(value: Option<&SubnetEntry>) -> Self {
match value {
Some(v) => v.into(),
None => Routes::None,
}
}
}
/// The routing table holds a list of route entries for every known subnet.
#[derive(Clone)]
pub struct RoutingTable {
writer: Arc<Mutex<left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>>,
reader: left_right::ReadHandle<RoutingTableInner>,
shared: Arc<RoutingTableShared>,
}
struct RoutingTableShared {
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
}
#[derive(Default)]
struct RoutingTableInner {
table: IpLookupTable<Ipv6Addr, Arc<SubnetEntry>>,
}
/// Hold an exclusive write lock over the routing table. While this item is in scope, no other
/// calls can get a mutable refernce to the content of a routing table. Once this guard goes out of
/// scope, changes to the contained RouteList will be applied.
pub struct WriteGuard<'a> {
routing_table: &'a RoutingTable,
/// Owned copy of the RouteList, this is populated once mutable access the the RouteList has
/// been requested.
value: Arc<SubnetEntry>,
/// Did the RouteList exist initially?
exists: bool,
/// The subnet we are writing to.
subnet: Subnet,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancellation_token: CancellationToken,
}
impl RoutingTable {
/// Create a new empty RoutingTable. The passed channel is used to notify an external observer
/// of route entry expiration events. It is the callers responsibility to ensure these events
/// are properly handled.
///
/// # Panics
///
/// This will panic if not executed in the context of a tokio runtime.
pub fn new(expired_route_entry_sink: mpsc::Sender<RouteKey>) -> Self {
let (writer, reader) = left_right::new();
let writer = Arc::new(Mutex::new(writer));
let cancel_token = CancellationToken::new();
let shared = Arc::new(RoutingTableShared {
expired_route_entry_sink,
cancel_token,
});
RoutingTable {
writer,
reader,
shared,
}
}
/// Get a list of the routes for the most precises [`Subnet`] known which contains the given
/// [`IpAddr`].
pub fn best_routes(&self, ip: IpAddr) -> Routes {
let IpAddr::V6(ip) = ip else {
panic!("Only IPv6 is supported currently");
};
self.reader
.enter()
.expect("Write handle is saved on the router so it is not dropped yet.")
.table
.longest_match(ip)
.map(|(_, _, rl)| rl.as_ref())
.into()
}
/// Get a list of all routes for the given subnet. Changes to the RoutingTable after this
/// method returns will not be visible and require this method to be called again to be
/// observed.
pub fn routes(&self, subnet: Subnet) -> Routes {
let subnet_ip = if let IpAddr::V6(ip) = subnet.address() {
ip
} else {
return Routes::None;
};
self.reader
.enter()
.expect("Write handle is saved on the router so it is not dropped yet.")
.table
.exact_match(subnet_ip, subnet.prefix_len().into())
.map(Arc::as_ref)
.into()
}
/// Gets continued read access to the `RoutingTable`. While the returned
/// [`guard`](RoutingTableReadGuard) is held, updates to the `RoutingTable` will be blocked.
pub fn read(&self) -> RoutingTableReadGuard<'_> {
RoutingTableReadGuard {
guard: self
.reader
.enter()
.expect("Write handle is saved on RoutingTable, so this is always Some; qed"),
}
}
/// Locks the `RoutingTable` for continued write access. While the returned
/// [`guard`](RoutingTableWriteGuard) is held, methods trying to mutate the `RoutingTable`, or
/// get mutable access otherwise, will be blocked. When the [`guard`](`RoutingTableWriteGuard`)
/// is dropped, all queued changes will be applied.
pub fn write(&self) -> RoutingTableWriteGuard<'_> {
RoutingTableWriteGuard {
write_guard: self.writer.lock().unwrap(),
read_guard: self
.reader
.enter()
.expect("Write handle is saved on RoutingTable, so this is always Some; qed"),
expired_route_entry_sink: self.shared.expired_route_entry_sink.clone(),
cancel_token: self.shared.cancel_token.clone(),
}
}
/// Get mutable access to the list of routes for the given [`Subnet`].
pub fn routes_mut(&self, subnet: Subnet) -> Option<WriteGuard<'_>> {
let subnet_address = if let IpAddr::V6(ip) = subnet.address() {
ip
} else {
panic!("IP v4 addresses are not supported")
};
let value = self
.reader
.enter()
.expect("Write handle is saved next to read handle so this is always Some; qed")
.table
.exact_match(subnet_address, subnet.prefix_len().into())?
.clone();
if matches!(*value, SubnetEntry::Exists { .. }) {
Some(WriteGuard {
routing_table: self,
// If we didn't find a route list in the route table we create a new empty list,
// therefore we immediately own it.
value,
exists: true,
subnet,
expired_route_entry_sink: self.shared.expired_route_entry_sink.clone(),
cancellation_token: self.shared.cancel_token.clone(),
})
} else {
None
}
}
/// Adds a new [`Subnet`] to the `RoutingTable`. The returned [`WriteGuard`] can be used to
/// insert entries. If no entry is inserted before the guard is dropped, the [`Subnet`] won't
/// be added.
pub fn add_subnet(&self, subnet: Subnet, shared_secret: SharedSecret) -> WriteGuard<'_> {
if !matches!(subnet.address(), IpAddr::V6(_)) {
panic!("IP v4 addresses are not supported")
};
let value = Arc::new(SubnetEntry::Exists {
list: Arc::new(RouteList::new(shared_secret)).into(),
});
WriteGuard {
routing_table: self,
value,
exists: false,
subnet,
expired_route_entry_sink: self.shared.expired_route_entry_sink.clone(),
cancellation_token: self.shared.cancel_token.clone(),
}
}
/// Gets the selected route for an IpAddr if one exists.
///
/// # Panics
///
/// This will panic if the IP address is not an IPV6 address.
pub fn selected_route(&self, address: IpAddr) -> Option<RouteEntry> {
let IpAddr::V6(ip) = address else {
panic!("IP v4 addresses are not supported")
};
self.reader
.enter()
.expect("Write handle is saved on RoutingTable, so this is always Some; qed")
.table
.longest_match(ip)
.and_then(|(_, _, rl)| {
let SubnetEntry::Exists { list } = &**rl else {
return None;
};
let rl = list.load();
if rl.is_empty() || !rl[0].selected() {
None
} else {
Some(rl[0].clone())
}
})
}
/// Marks a subnet as queried in the route table.
///
/// This function will not do anything if the subnet contains valid routes.
pub fn mark_queried(&self, subnet: Subnet, query_timeout: tokio::time::Instant) {
if !matches!(subnet.address(), IpAddr::V6(_)) {
panic!("IP v4 addresses are not supported")
};
// Start a task to expire the queried state if we didn't have any results in time.
{
// We only need the write handle in the task
let writer = self.writer.clone();
let cancel_token = self.shared.cancel_token.clone();
tokio::task::spawn(async move {
select! {
_ = cancel_token.cancelled() => {
// Future got cancelled, nothing to do
return
}
_ = tokio::time::sleep_until(query_timeout) => {
// Timeout fired, mark as no route
}
}
let expiry = tokio::time::Instant::now() + NO_ROUTE_EXPIRATION;
// Scope this so the lock for the write_handle goes out of scope when we are done
// here, as we don't want to hold the write_handle lock while sleeping for the
// second timeout.
{
let mut write_handle = writer.lock().expect("Can lock writer");
write_handle.append(RoutingTableOplogEntry::QueryExpired(
subnet,
Arc::new(SubnetEntry::NoRoute { expiry }),
));
write_handle.flush();
}
// TODO: Check if we are indeed marked as NoRoute here, if we aren't this can be
// cancelled now
select! {
_ = cancel_token.cancelled() => {
// Future got cancelled, nothing to do
return
}
_ = tokio::time::sleep_until(expiry) => {
// Timeout fired, remove no route entry
}
}
let mut write_handle = writer.lock().expect("Can lock writer");
write_handle.append(RoutingTableOplogEntry::NoRouteExpired(subnet));
write_handle.flush();
});
}
let mut write_handle = self.writer.lock().expect("Can lock writer");
write_handle.append(RoutingTableOplogEntry::Queried(
subnet,
Arc::new(SubnetEntry::Queried { query_timeout }),
));
write_handle.flush();
}
}
pub struct RouteListReadGuard {
inner: arc_swap::Guard<Arc<RouteList>>,
}
impl Deref for RouteListReadGuard {
type Target = RouteList;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
/// A write guard over the [`RoutingTable`]. While this guard is held, updates won't be able to
/// complete.
pub struct RoutingTableWriteGuard<'a> {
write_guard: MutexGuard<'a, left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>,
read_guard: left_right::ReadGuard<'a, RoutingTableInner>,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
}
impl<'a, 'b> RoutingTableWriteGuard<'a> {
pub fn iter_mut(&'b mut self) -> RoutingTableIterMut<'a, 'b> {
RoutingTableIterMut::new(
&mut self.write_guard,
self.read_guard.table.iter(),
self.expired_route_entry_sink.clone(),
self.cancel_token.clone(),
)
}
}
impl Drop for RoutingTableWriteGuard<'_> {
fn drop(&mut self) {
self.write_guard.publish();
}
}
/// A read guard over the [`RoutingTable`]. While this guard is held, updates won't be able to
/// complete.
pub struct RoutingTableReadGuard<'a> {
guard: left_right::ReadGuard<'a, RoutingTableInner>,
}
impl RoutingTableReadGuard<'_> {
pub fn iter(&self) -> RoutingTableIter<'_> {
RoutingTableIter::new(self.guard.table.iter())
}
/// Create an iterator for all queried subnets in the routing table
pub fn iter_queries(&self) -> RoutingTableQueryIter<'_> {
RoutingTableQueryIter::new(self.guard.table.iter())
}
/// Create an iterator for all subnets which are currently marked as `NoRoute` in the routing
/// table.
pub fn iter_no_route(&self) -> RoutingTableNoRouteIter<'_> {
RoutingTableNoRouteIter::new(self.guard.table.iter())
}
}
impl WriteGuard<'_> {
/// Loads the current [`RouteList`].
#[inline]
pub fn routes(&self) -> RouteListReadGuard {
let SubnetEntry::Exists { list } = &*self.value else {
panic!("Write guard for non-route SubnetEntry")
};
RouteListReadGuard { inner: list.load() }
}
/// Get mutable access to the [`RouteList`]. This will update the [`RouteList`] in place
/// without locking the [`RoutingTable`].
// TODO: Proper abstractions
pub fn update_routes<
F: FnMut(&mut RouteList, &mpsc::Sender<RouteKey>, &CancellationToken) -> bool,
>(
&mut self,
mut op: F,
) -> bool {
let mut res = false;
let mut delete = false;
if let SubnetEntry::Exists { list } = &*self.value {
list.rcu(|rl| {
let mut new_val = rl.clone();
let v = Arc::make_mut(&mut new_val);
res = op(v, &self.expired_route_entry_sink, &self.cancellation_token);
delete = v.is_empty();
new_val
});
if delete && self.exists {
trace!(subnet = %self.subnet, "Deleting subnet which became empty after updating");
let mut writer = self.routing_table.writer.lock().unwrap();
writer.append(RoutingTableOplogEntry::Delete(self.subnet));
writer.publish();
}
res
} else {
false
}
}
/// Set the [`RouteEntry`] with the given [`neighbour`](Peer) as the selected route.
pub fn set_selected(&mut self, neighbour: &Peer) {
if let SubnetEntry::Exists { list } = &*self.value {
list.rcu(|routes| {
let mut new_routes = routes.clone();
let routes = Arc::make_mut(&mut new_routes);
let Some(pos) = routes.iter().position(|re| re.neighbour() == neighbour) else {
error!(
neighbour = neighbour.connection_identifier(),
"Failed to select route entry with given route key, no such entry"
);
return new_routes;
};
// We don't need a check for an empty list here, since we found a selected route there
// _MUST_ be at least 1 entry.
// Set the first element to unselected, then select the proper element so this also works
// in case the existing route is "reselected".
routes[0].set_selected(false);
routes[pos].set_selected(true);
routes.swap(0, pos);
new_routes
});
}
}
/// Unconditionally unselects the selected route, if one is present.
///
/// In case no route is selected, this is a no-op.
pub fn unselect(&mut self) {
if let SubnetEntry::Exists { list } = &*self.value {
list.rcu(|v| {
let mut new_val = v.clone();
let new_ref = Arc::make_mut(&mut new_val);
if let Some(e) = new_ref.get_mut(0) {
e.set_selected(false);
}
new_val
});
}
}
}
impl Drop for WriteGuard<'_> {
fn drop(&mut self) {
// FIXME: try to get rid of clones on the Arc here
if let SubnetEntry::Exists { list } = &*self.value {
let value = list.load();
match self.exists {
// The route list did not exist, and now it is not empty, so an entry was added. We
// need to add the route list to the routing table.
false if !value.is_empty() => {
trace!(subnet = %self.subnet, "Inserting new route list for subnet");
let mut writer = self.routing_table.writer.lock().unwrap();
writer.append(RoutingTableOplogEntry::Upsert(
self.subnet,
Arc::clone(&self.value),
));
writer.publish();
}
// There was an existing route list which is now empty, so the entry for this subnet
// needs to be deleted in the routing table.
true if value.is_empty() => {
trace!(subnet = %self.subnet, "Removing route list for subnet");
let mut writer = self.routing_table.writer.lock().unwrap();
writer.append(RoutingTableOplogEntry::Delete(self.subnet));
writer.publish();
}
// Nothing to do in these cases. Either no value was inserted in a non existing
// routelist, or an existing one was updated in place.
_ => {}
}
}
}
}
/// Operations allowed on the left_right for the routing table.
enum RoutingTableOplogEntry {
/// Insert or Update the value for the given subnet.
Upsert(Subnet, Arc<SubnetEntry>),
/// Mark a subnet as queried.
Queried(Subnet, Arc<SubnetEntry>),
/// Delete the entry for the given subnet.
Delete(Subnet),
/// The route request for a subnet expired, if it is still in query state mark it as not
/// existing
QueryExpired(Subnet, Arc<SubnetEntry>),
/// The marker for explicitly not having a route to a subnet has expired
NoRouteExpired(Subnet),
}
/// Convert an [`IpAddr`] into an [`Ipv6Addr`]. Panics if the contained addrss is not an IPv6
/// address.
fn expect_ipv6(ip: IpAddr) -> Ipv6Addr {
let IpAddr::V6(ip) = ip else {
panic!("Expected ipv6 address")
};
ip
}
impl left_right::Absorb<RoutingTableOplogEntry> for RoutingTableInner {
fn absorb_first(&mut self, operation: &mut RoutingTableOplogEntry, _other: &Self) {
match operation {
RoutingTableOplogEntry::Upsert(subnet, list) => {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
Arc::clone(list),
);
}
RoutingTableOplogEntry::Queried(subnet, se) => {
// Mark a query only if we don't have a valid entry
let entry = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
.map(Arc::deref);
// If we have no route, transition to query, if we have a route or existing query,
// do nothing
if matches!(entry, None | Some(SubnetEntry::NoRoute { .. })) {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
Arc::clone(se),
);
}
}
RoutingTableOplogEntry::Delete(subnet) => {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
RoutingTableOplogEntry::QueryExpired(subnet, nre) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::Queried { .. } = &**entry {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
Arc::clone(nre),
);
}
}
}
RoutingTableOplogEntry::NoRouteExpired(subnet) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::NoRoute { .. } = &**entry {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
}
}
}
}
fn sync_with(&mut self, first: &Self) {
for (k, ss, v) in first.table.iter() {
self.table.insert(k, ss, v.clone());
}
}
fn absorb_second(&mut self, operation: RoutingTableOplogEntry, _: &Self) {
match operation {
RoutingTableOplogEntry::Upsert(subnet, list) => {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
list,
);
}
RoutingTableOplogEntry::Queried(subnet, se) => {
// Mark a query only if we don't have a valid entry
let entry = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
.map(Arc::deref);
// If we have no route, transition to query, if we have a route or existing query,
// do nothing
if matches!(entry, None | Some(SubnetEntry::NoRoute { .. })) {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
se,
);
}
}
RoutingTableOplogEntry::Delete(subnet) => {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
RoutingTableOplogEntry::QueryExpired(subnet, nre) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::Queried { .. } = &**entry {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
nre,
);
}
}
}
RoutingTableOplogEntry::NoRouteExpired(subnet) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::NoRoute { .. } = &**entry {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
}
}
}
}
}
impl Drop for RoutingTableShared {
fn drop(&mut self) {
self.cancel_token.cancel();
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/subnet.rs | mycelium/src/subnet.rs | //! A dedicated subnet module.
//!
//! The standard library only exposes [`IpAddr`], and types related to
//! specific IPv4 and IPv6 addresses. It does not however, expose dedicated types to represent
//! appropriate subnets.
//!
//! This code is not meant to fully support subnets, but rather only the subset as needed by the
//! main application code. As such, this implementation is optimized for the specific use case, and
//! might not be optimal for other uses.
use core::fmt;
use std::{
hash::Hash,
net::{IpAddr, Ipv6Addr},
str::FromStr,
};
use ipnet::IpNet;
/// Representation of a subnet. A subnet can be either IPv4 or IPv6.
#[derive(Debug, Clone, Copy, Eq, PartialOrd, Ord)]
pub struct Subnet {
inner: IpNet,
}
/// An error returned when creating a new [`Subnet`] with an invalid prefix length.
///
/// For IPv4, the max prefix length is 32, and for IPv6 it is 128;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct PrefixLenError;
impl Subnet {
/// Create a new `Subnet` from the given [`IpAddr`] and prefix length.
pub fn new(addr: IpAddr, prefix_len: u8) -> Result<Subnet, PrefixLenError> {
Ok(Self {
inner: IpNet::new(addr, prefix_len).map_err(|_| PrefixLenError)?,
})
}
/// Returns the size of the prefix in bits.
pub fn prefix_len(&self) -> u8 {
self.inner.prefix_len()
}
/// Retuns the address in this subnet.
///
/// The returned address is a full IP address, used to construct this `Subnet`.
///
/// # Examples
///
/// ```
/// use mycelium::subnet::Subnet;
/// use std::net::Ipv6Addr;
///
/// let address = Ipv6Addr::new(12,34,56,78,90,0xab,0xcd,0xef).into();
/// let subnet = Subnet::new(address, 64).unwrap();
///
/// assert_eq!(subnet.address(), address);
/// ```
pub fn address(&self) -> IpAddr {
self.inner.addr()
}
/// Checks if this `Subnet` contains the provided `Subnet`, i.e. all addresses of the provided
/// `Subnet` are also part of this `Subnet`
///
/// # Examples
///
/// ```
/// use mycelium::subnet::Subnet;
/// use std::net::Ipv4Addr;
///
/// let global = Subnet::new(Ipv4Addr::new(0,0,0,0).into(), 0).expect("Defined a valid subnet");
/// let local = Subnet::new(Ipv4Addr::new(10,0,0,0).into(), 8).expect("Defined a valid subnet");
///
/// assert!(global.contains_subnet(&local));
/// assert!(!local.contains_subnet(&global));
/// ```
pub fn contains_subnet(&self, other: &Self) -> bool {
self.inner.contains(&other.inner)
}
/// Checks if this `Subnet` contains the provided [`IpAddr`].
///
/// # Examples
///
/// ```
/// use mycelium::subnet::Subnet;
/// use std::net::{Ipv4Addr,Ipv6Addr};
///
/// let ip_1 = Ipv6Addr::new(12,34,56,78,90,0xab,0xcd,0xef).into();
/// let ip_2 = Ipv6Addr::new(90,0xab,0xcd,0xef,12,34,56,78).into();
/// let ip_3 = Ipv4Addr::new(10,1,2,3).into();
/// let subnet = Subnet::new(Ipv6Addr::new(12,34,5,6,7,8,9,0).into(), 32).unwrap();
///
/// assert!(subnet.contains_ip(ip_1));
/// assert!(!subnet.contains_ip(ip_2));
/// assert!(!subnet.contains_ip(ip_3));
/// ```
pub fn contains_ip(&self, ip: IpAddr) -> bool {
self.inner.contains(&ip)
}
/// Returns the network part of the `Subnet`. All non prefix bits are set to 0.
///
/// # Examples
///
/// ```
/// use mycelium::subnet::Subnet;
/// use std::net::{IpAddr, Ipv4Addr,Ipv6Addr};
///
/// let subnet_1 = Subnet::new(Ipv6Addr::new(12,34,56,78,90,0xab,0xcd,0xef).into(),
/// 32).unwrap();
/// let subnet_2 = Subnet::new(Ipv4Addr::new(10,1,2,3).into(), 8).unwrap();
///
/// assert_eq!(subnet_1.network(), IpAddr::V6(Ipv6Addr::new(12,34,0,0,0,0,0,0)));
/// assert_eq!(subnet_2.network(), IpAddr::V4(Ipv4Addr::new(10,0,0,0)));
/// ```
pub fn network(&self) -> IpAddr {
self.inner.network()
}
/// Returns the braodcast address for the subnet.
///
/// # Examples
///
/// ```
/// use mycelium::subnet::Subnet;
/// use std::net::{IpAddr, Ipv4Addr,Ipv6Addr};
///
/// let subnet_1 = Subnet::new(Ipv6Addr::new(12,34,56,78,90,0xab,0xcd,0xef).into(),
/// 32).unwrap();
/// let subnet_2 = Subnet::new(Ipv4Addr::new(10,1,2,3).into(), 8).unwrap();
///
/// assert_eq!(subnet_1.broadcast_addr(),
/// IpAddr::V6(Ipv6Addr::new(12,34,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff)));
/// assert_eq!(subnet_2.broadcast_addr(), IpAddr::V4(Ipv4Addr::new(10,255,255,255)));
/// ```
pub fn broadcast_addr(&self) -> IpAddr {
self.inner.broadcast()
}
/// Returns the netmask of the subnet as an [`IpAddr`].
pub fn mask(&self) -> IpAddr {
self.inner.netmask()
}
}
impl From<Ipv6Addr> for Subnet {
fn from(value: Ipv6Addr) -> Self {
Self::new(value.into(), 128).expect("128 is a valid subnet size for an IPv6 address; qed")
}
}
#[derive(Debug, Clone)]
/// An error indicating a malformed subnet
pub struct SubnetParseError {
_private: (),
}
impl SubnetParseError {
/// Create a new SubnetParseError
fn new() -> Self {
Self { _private: () }
}
}
impl core::fmt::Display for SubnetParseError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.pad("malformed subnet")
}
}
impl std::error::Error for SubnetParseError {}
impl FromStr for Subnet {
type Err = SubnetParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Ok(ipnet) = s.parse::<ipnet::IpNet>() {
return Ok(
Subnet::new(ipnet.addr(), ipnet.prefix_len()).expect("Parsed subnet size is valid")
);
}
// Try to parse as an IP address (convert to /32 or /128 subnet)
if let Ok(ip) = s.parse::<std::net::IpAddr>() {
let prefix_len = match ip {
std::net::IpAddr::V4(_) => 32,
std::net::IpAddr::V6(_) => 128,
};
return Ok(Subnet::new(ip, prefix_len).expect("Static subnet sizes are valid"));
}
Err(SubnetParseError::new())
}
}
impl fmt::Display for Subnet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.inner)
}
}
impl PartialEq for Subnet {
fn eq(&self, other: &Self) -> bool {
// Quic check, subnets of different sizes are never equal.
if self.prefix_len() != other.prefix_len() {
return false;
}
// Full check
self.network() == other.network()
}
}
impl Hash for Subnet {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
// First write the subnet size
state.write_u8(self.prefix_len());
// Then write the IP of the network. This sets the non prefix bits to 0, so hash values
// will be equal according to the PartialEq rules.
self.network().hash(state)
}
}
impl fmt::Display for PrefixLenError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Invalid prefix length for this address")
}
}
impl std::error::Error for PrefixLenError {}
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
use super::Subnet;
#[test]
fn test_subnet_equality() {
let subnet_1 =
Subnet::new(Ipv6Addr::new(12, 23, 34, 45, 56, 67, 78, 89).into(), 64).unwrap();
let subnet_2 =
Subnet::new(Ipv6Addr::new(12, 23, 34, 45, 67, 78, 89, 90).into(), 64).unwrap();
let subnet_3 =
Subnet::new(Ipv6Addr::new(12, 23, 34, 40, 67, 78, 89, 90).into(), 64).unwrap();
let subnet_4 = Subnet::new(Ipv6Addr::new(12, 23, 34, 45, 0, 0, 0, 0).into(), 64).unwrap();
let subnet_5 = Subnet::new(
Ipv6Addr::new(12, 23, 34, 45, 0xffff, 0xffff, 0xffff, 0xffff).into(),
64,
)
.unwrap();
let subnet_6 =
Subnet::new(Ipv6Addr::new(12, 23, 34, 45, 56, 67, 78, 89).into(), 63).unwrap();
assert_eq!(subnet_1, subnet_2);
assert_ne!(subnet_1, subnet_3);
assert_eq!(subnet_1, subnet_4);
assert_eq!(subnet_1, subnet_5);
assert_ne!(subnet_1, subnet_6);
let subnet_1 = Subnet::new(Ipv4Addr::new(10, 1, 2, 3).into(), 24).unwrap();
let subnet_2 = Subnet::new(Ipv4Addr::new(10, 1, 2, 102).into(), 24).unwrap();
let subnet_3 = Subnet::new(Ipv4Addr::new(10, 1, 4, 3).into(), 24).unwrap();
let subnet_4 = Subnet::new(Ipv4Addr::new(10, 1, 2, 0).into(), 24).unwrap();
let subnet_5 = Subnet::new(Ipv4Addr::new(10, 1, 2, 255).into(), 24).unwrap();
let subnet_6 = Subnet::new(Ipv4Addr::new(10, 1, 2, 3).into(), 16).unwrap();
assert_eq!(subnet_1, subnet_2);
assert_ne!(subnet_1, subnet_3);
assert_eq!(subnet_1, subnet_4);
assert_eq!(subnet_1, subnet_5);
assert_ne!(subnet_1, subnet_6);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/packet.rs | mycelium/src/packet.rs | use bytes::{Buf, BufMut, BytesMut};
pub use control::ControlPacket;
pub use data::DataPacket;
use tokio_util::codec::{Decoder, Encoder};
mod control;
mod data;
/// Current version of the protocol being used.
const PROTOCOL_VERSION: u8 = 1;
/// The size of a `Packet` header on the wire, in bytes.
const PACKET_HEADER_SIZE: usize = 4;
#[derive(Debug, Clone)]
pub enum Packet {
DataPacket(DataPacket),
ControlPacket(ControlPacket),
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum PacketType {
DataPacket = 0,
ControlPacket = 1,
}
pub struct Codec {
packet_type: Option<PacketType>,
data_packet_codec: data::Codec,
control_packet_codec: control::Codec,
}
impl Codec {
pub fn new() -> Self {
Codec {
packet_type: None,
data_packet_codec: data::Codec::new(),
control_packet_codec: control::Codec::new(),
}
}
}
impl Decoder for Codec {
type Item = Packet;
type Error = std::io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// Determine the packet_type
let packet_type = if let Some(packet_type) = self.packet_type {
packet_type
} else {
// Check we can read the header
if src.remaining() <= PACKET_HEADER_SIZE {
return Ok(None);
}
let mut header = [0; PACKET_HEADER_SIZE];
header.copy_from_slice(&src[..PACKET_HEADER_SIZE]);
src.advance(PACKET_HEADER_SIZE);
// For now it's a hard error to not follow the 1 defined protocol version
if header[0] != PROTOCOL_VERSION {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Unknown protocol version",
));
};
let packet_type_byte = header[1];
let packet_type = match packet_type_byte {
0 => PacketType::DataPacket,
1 => PacketType::ControlPacket,
_ => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Invalid packet type",
));
}
};
self.packet_type = Some(packet_type);
packet_type
};
// Decode packet based on determined packet_type
match packet_type {
PacketType::DataPacket => {
match self.data_packet_codec.decode(src) {
Ok(Some(p)) => {
self.packet_type = None; // Reset state
Ok(Some(Packet::DataPacket(p)))
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
PacketType::ControlPacket => {
match self.control_packet_codec.decode(src) {
Ok(Some(p)) => {
self.packet_type = None; // Reset state
Ok(Some(Packet::ControlPacket(p)))
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
}
}
}
impl Encoder<Packet> for Codec {
type Error = std::io::Error;
fn encode(&mut self, item: Packet, dst: &mut BytesMut) -> Result<(), Self::Error> {
match item {
Packet::DataPacket(datapacket) => {
dst.put_slice(&[PROTOCOL_VERSION, 0, 0, 0]);
self.data_packet_codec.encode(datapacket, dst)
}
Packet::ControlPacket(controlpacket) => {
dst.put_slice(&[PROTOCOL_VERSION, 1, 0, 0]);
self.control_packet_codec.encode(controlpacket, dst)
}
}
}
}
impl Default for Codec {
fn default() -> Self {
Self::new()
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/router.rs | mycelium/src/router.rs | use crate::{
babel::{self, Hello, Ihu, RouteRequest, SeqNoRequest, Update},
crypto::{PacketBuffer, PublicKey, SecretKey, SharedSecret},
filters::RouteUpdateFilter,
metric::Metric,
metrics::Metrics,
packet::{ControlPacket, DataPacket},
peer::Peer,
router_id::RouterId,
routing_table::{
NoRouteSubnet, QueriedSubnet, RouteEntry, RouteKey, RouteList, Routes, RoutingTable,
},
rr_cache::RouteRequestCache,
seqno_cache::{SeqnoCache, SeqnoRequestCacheKey},
sequence_number::SeqNo,
source_table::{FeasibilityDistance, SourceKey, SourceTable},
subnet::Subnet,
};
use etherparse::{
icmpv6::{DestUnreachableCode, TimeExceededCode},
Icmpv6Type,
};
use std::{
error::Error,
hash::{Hash, Hasher},
net::IpAddr,
sync::{Arc, RwLock},
time::{Duration, Instant},
};
use tokio::sync::mpsc::{self, Receiver, Sender, UnboundedReceiver, UnboundedSender};
use tracing::{debug, error, info, trace, warn};
/// Time between HELLO messags, in seconds
const HELLO_INTERVAL: u64 = 20;
/// Time filled in in IHU packet
const IHU_INTERVAL: Duration = Duration::from_secs(HELLO_INTERVAL * 3);
/// Max time used in UPDATE packets. For local (static) routes this is the timeout they are
/// advertised with.
const UPDATE_INTERVAL: Duration = Duration::from_secs(HELLO_INTERVAL * 3 * 5);
/// Time between selected route announcements to peers.
const ROUTE_PROPAGATION_INTERVAL: Duration = UPDATE_INTERVAL;
/// Amount of seconds that can elapse before we consider a [`Peer`] as dead from the routers POV.
/// Since IHU's are sent in response to HELLO packets, this MUST be greater than the
/// [`HELLO_INTERVAL`].
///
/// We allow missing 1 hello, + some latency, so 2 HELLO's + 3 seconds for latency.
const DEAD_PEER_THRESHOLD: Duration = Duration::from_secs(HELLO_INTERVAL * 2 + 3);
/// The duration between checks for dead peers in the router. This check only looks for peers where
/// time since the last IHU exceeds DEAD_PEER_THRESHOLD.
const DEAD_PEER_CHECK_INTERVAL: Duration = Duration::from_secs(10);
/// Amount of time to wait between consecutive seqno bumps of the local router seqno.
const SEQNO_BUMP_TIMEOUT: Duration = Duration::from_secs(4);
/// Metric change of more than 10 is considered a large change.
const BIG_METRIC_CHANGE_TRESHOLD: Metric = Metric::new(10);
/// The amount a metric of a route needs to improve before we will consider switching to it.
const SIGNIFICANT_METRIC_IMPROVEMENT: Metric = Metric::new(10);
/// Hold retracted routes for 1 minute before purging them from the [`RoutingTable`].
const RETRACTED_ROUTE_HOLD_TIME: Duration = Duration::from_secs(6);
/// The interval specified in updates if the update won't be repeated.
const INTERVAL_NOT_REPEATING: Duration = Duration::from_millis(6);
/// The maximum generation of a [`RouteRequest`] we are still willing to transmit.
const MAX_RR_GENERATION: u8 = 16;
/// Give a route query 5 seconds to resolve, this should be plenty generous.
const ROUTE_QUERY_TIMEOUT: Duration = Duration::from_secs(5);
/// Amount of time to wait before checking if a queried route resolved.
// TODO: Remove once proper feedback is in place
const QUERY_CHECK_DURATION: Duration = Duration::from_millis(100);
/// The threshold for route expiry under which we want to send a route requets for a subnet if it is used.
const ROUTE_ALMOST_EXPIRED_TRESHOLD: tokio::time::Duration = Duration::from_secs(15);
pub struct Router<M> {
routing_table: RoutingTable,
peer_interfaces: Arc<RwLock<Vec<Peer>>>,
source_table: Arc<RwLock<SourceTable>>,
// Router SeqNo and last time it was bumped
router_seqno: Arc<RwLock<(SeqNo, Instant)>>,
static_routes: Vec<Subnet>,
router_id: RouterId,
node_keypair: (SecretKey, PublicKey),
router_data_tx: Sender<DataPacket>,
router_control_tx: UnboundedSender<(ControlPacket, Peer)>,
node_tun: UnboundedSender<DataPacket>,
node_tun_subnet: Subnet,
update_filters: Arc<Vec<Box<dyn RouteUpdateFilter + Send + Sync>>>,
/// Channel injected into peers, so they can notify the router if they exit.
dead_peer_sink: mpsc::Sender<Peer>,
/// Channel to notify the router of expired SourceKey's.
expired_source_key_sink: mpsc::Sender<SourceKey>,
seqno_cache: SeqnoCache,
rr_cache: RouteRequestCache,
update_workers: usize,
metrics: M,
}
impl<M> Router<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Create a new `Router`.
///
/// # Panics
///
/// If update_workers is not in the range of [1..255], this will panic.
pub fn new(
update_workers: usize,
node_tun: UnboundedSender<DataPacket>,
node_tun_subnet: Subnet,
static_routes: Vec<Subnet>,
node_keypair: (SecretKey, PublicKey),
update_filters: Vec<Box<dyn RouteUpdateFilter + Send + Sync>>,
metrics: M,
) -> Result<Self, Box<dyn Error>> {
// We could use a NonZeroU8 here, but for now just handle this manually as this might get
// changed in the future.
if !(1..255).contains(&update_workers) {
panic!("update workers must be at least 1 and at most 255");
}
// Tx is passed onto each new peer instance. This enables peers to send control packets to the router.
let (router_control_tx, router_control_rx) = mpsc::unbounded_channel();
// Tx is passed onto each new peer instance. This enables peers to send data packets to the router.
let (router_data_tx, router_data_rx) = mpsc::channel::<DataPacket>(1000);
let (expired_source_key_sink, expired_source_key_stream) = mpsc::channel(1);
let (expired_route_entry_sink, expired_route_entry_stream) = mpsc::channel(1);
let (dead_peer_sink, dead_peer_stream) = mpsc::channel(100);
let routing_table = RoutingTable::new(expired_route_entry_sink);
let router_id = RouterId::new(node_keypair.1);
let seqno_cache = SeqnoCache::new();
let rr_cache = RouteRequestCache::new(ROUTE_ALMOST_EXPIRED_TRESHOLD);
let router = Router {
routing_table,
peer_interfaces: Arc::new(RwLock::new(Vec::new())),
source_table: Arc::new(RwLock::new(SourceTable::new())),
router_seqno: Arc::new(RwLock::new((SeqNo::new(), Instant::now()))),
static_routes,
router_id,
node_keypair,
router_data_tx,
router_control_tx,
node_tun,
node_tun_subnet,
dead_peer_sink,
expired_source_key_sink,
seqno_cache,
rr_cache,
update_filters: Arc::new(update_filters),
update_workers,
metrics,
};
tokio::spawn(Router::start_periodic_hello_sender(router.clone()));
tokio::spawn(Router::handle_incoming_control_packet(
router.clone(),
router_control_rx,
));
tokio::spawn(Router::handle_incoming_data_packet(
router.clone(),
router_data_rx,
));
tokio::spawn(Router::propagate_static_routes(router.clone()));
tokio::spawn(Router::check_for_dead_peers(router.clone()));
tokio::spawn(Router::process_expired_source_keys(
router.clone(),
expired_source_key_stream,
));
tokio::spawn(Router::process_expired_route_keys(
router.clone(),
expired_route_entry_stream,
));
tokio::spawn(Router::process_dead_peers(router.clone(), dead_peer_stream));
Ok(router)
}
pub fn router_control_tx(&self) -> UnboundedSender<(ControlPacket, Peer)> {
self.router_control_tx.clone()
}
pub fn router_data_tx(&self) -> Sender<DataPacket> {
self.router_data_tx.clone()
}
pub fn node_tun_subnet(&self) -> Subnet {
self.node_tun_subnet
}
pub fn node_tun(&self) -> UnboundedSender<DataPacket> {
self.node_tun.clone()
}
/// Get all peer interfaces known on the router.
pub fn peer_interfaces(&self) -> Vec<Peer> {
self.peer_interfaces.read().unwrap().clone()
}
/// Add a peer interface to the router.
pub fn add_peer_interface(&self, peer: Peer) {
debug!("Adding peer {} to router", peer.connection_identifier());
self.peer_interfaces.write().unwrap().push(peer.clone());
self.metrics.router_peer_added();
// Make sure to set the timers to current values in case lock acquisition takes time. Otherwise these
// might immediately cause timeout timers to fire.
peer.set_time_last_received_hello(tokio::time::Instant::now());
peer.set_time_last_received_ihu(tokio::time::Instant::now());
}
/// Get the public key used by the router
pub fn node_public_key(&self) -> PublicKey {
self.node_keypair.1
}
/// Get the [`RouterId`] of the `Router`.
pub fn router_id(&self) -> RouterId {
self.router_id
}
/// Get the [`PublicKey`] for an [`IpAddr`] if a route exists to the IP.
pub fn get_pubkey(&self, ip: IpAddr) -> Option<PublicKey> {
if let Routes::Exist(routes) = self.routing_table.best_routes(ip) {
if routes.is_empty() {
None
} else {
Some(routes[0].source().router_id().to_pubkey())
}
} else {
None
}
}
/// Gets the cached [`SharedSecret`] for the remote.
pub fn get_shared_secret_from_dest(&self, dest: IpAddr) -> Option<SharedSecret> {
// TODO: Make properly async
for _ in 0..50 {
match self.routing_table.best_routes(dest) {
Routes::Exist(routes) => return Some(routes.shared_secret().clone()),
Routes::Queried => {}
Routes::NoRoute => return None,
Routes::None => {
// NOTE: we request the full /64 subnet
self.send_route_request(
Subnet::new(dest, 64)
.expect("64 is a valid subnet size for an IPv6 address; qed"),
);
}
}
tokio::task::block_in_place(|| std::thread::sleep(QUERY_CHECK_DURATION));
}
None
}
/// Get a [`SharedSecret`] for a remote, if a selected route exists to the remote.
// TODO: Naming
pub fn get_shared_secret_if_selected(&self, dest: IpAddr) -> Option<SharedSecret> {
// TODO: Make properly async
for _ in 0..50 {
match self.routing_table.best_routes(dest) {
Routes::Exist(routes) => {
if routes.selected().is_some() {
return Some(routes.shared_secret().clone());
} else {
// Optimistically try to fetch a new route for the subnet for next time.
// TODO: this can likely be handled better, but that relies on continuously
// quering routes in use, and handling unfeasible routes
if let Some(route_entry) = routes.iter().next() {
// We have a fallback route, use the source key from that to do a seqno
// request. Since the next hop might be dead, just do a broadcast. This
// might be blocked by the seqno request cache.
self.send_seqno_request(route_entry.source(), None, None);
} else {
// We don't have any routes, so send a route request. this might fail due
// to the source table.
self.send_route_request(
Subnet::new(dest, 64)
.expect("64 is a valid subnet size for an IPv6 address; qed"),
);
}
return None;
}
}
Routes::Queried => {}
Routes::NoRoute => {
return None;
}
Routes::None => {
// NOTE: we request the full /64 subnet
self.send_route_request(
Subnet::new(dest, 64)
.expect("64 is a valid subnet size for an IPv6 address; qed"),
);
}
}
tokio::task::block_in_place(|| std::thread::sleep(QUERY_CHECK_DURATION));
}
None
}
/// Gets the cached [`SharedSecret`] based on the associated [`PublicKey`] of the remote.
#[inline]
pub fn get_shared_secret_by_pubkey(&self, dest: &PublicKey) -> Option<SharedSecret> {
self.get_shared_secret_from_dest(dest.address().into())
}
/// Get a reference to this `Router`s' dead peer sink.
pub fn dead_peer_sink(&self) -> &mpsc::Sender<Peer> {
&self.dead_peer_sink
}
/// Remove a peer from the Router.
fn remove_peer_interface(&self, peer: &Peer) {
debug!(
"Removing peer {} from the router",
peer.connection_identifier()
);
peer.died();
let mut peers = self.peer_interfaces.write().unwrap();
let old_peers = peers.len();
peers.retain(|p| p != peer);
let removed = old_peers - peers.len();
for _ in 0..removed {
self.metrics.router_peer_removed();
}
if removed > 1 {
warn!(
"REMOVED {removed} peers from peer list while called with {}",
peer.connection_identifier()
);
}
}
/// Get a list of all selected route entries.
pub fn load_selected_routes(&self) -> Vec<RouteEntry> {
self.routing_table
.read()
.iter()
.filter_map(|(_, rl)| rl.selected().cloned())
.collect()
}
/// Get a list of all fallback route entries.
pub fn load_fallback_routes(&self) -> Vec<RouteEntry> {
self.routing_table
.read()
.iter()
.flat_map(|(_, rl)| rl.iter().cloned().collect::<Vec<_>>())
.filter(|re| !re.selected())
.collect()
}
/// Get a list of all [`queried subnets`](QueriedSubnet).
pub fn load_queried_subnets(&self) -> Vec<QueriedSubnet> {
self.routing_table.read().iter_queries().collect()
}
pub fn load_no_route_entries(&self) -> Vec<NoRouteSubnet> {
self.routing_table.read().iter_no_route().collect()
}
/// Task which periodically checks for dead peers in the Router.
async fn check_for_dead_peers(self) {
loop {
// check for dead peers every second
tokio::time::sleep(DEAD_PEER_CHECK_INTERVAL).await;
trace!("Checking for dead peers");
let dead_peers = {
// a peer is assumed dead when the peer's last sent ihu exceeds a threshold
let mut dead_peers = Vec::new();
for peer in self.peer_interfaces.read().unwrap().iter() {
// check if the peer's last_received_ihu is greater than the threshold
if peer.time_last_received_ihu().elapsed() > DEAD_PEER_THRESHOLD {
// peer is dead
info!("Peer {} is dead", peer.connection_identifier());
// Notify peer it's dead in case it's not aware of that yet.
peer.died();
dead_peers.push(peer.clone());
}
}
dead_peers
};
self.handle_dead_peer(&dead_peers);
}
}
/// Remove a dead peer from the router.
pub fn handle_dead_peer(&self, dead_peers: &[Peer]) {
for dead_peer in dead_peers {
self.metrics.router_peer_died();
debug!(
"Cleaning up peer {} which is reportedly dead",
dead_peer.connection_identifier()
);
self.remove_peer_interface(dead_peer);
}
// Scope for routing table write access.
let subnets_to_select = {
let mut rt_write = self.routing_table.write();
let mut rt_write = rt_write.iter_mut();
let mut subnets_to_select = Vec::new();
while let Some((subnet, mut rl)) = rt_write.next() {
rl.update_routes(|routes, eres, ct| {
for dead_peer in dead_peers {
let Some(mut re) = routes.iter_mut().find(|re| re.neighbour() == dead_peer)
else {
continue;
};
if re.selected() {
subnets_to_select.push(subnet);
// Don't clear selected flag yet, running route selection does that for us.
re.set_metric(Metric::infinite());
re.set_expires(
tokio::time::Instant::now() + RETRACTED_ROUTE_HOLD_TIME,
eres.clone(),
ct.clone(),
);
} else {
routes.remove(dead_peer);
}
}
});
}
subnets_to_select
};
// And run required route selection
for subnet in subnets_to_select {
self.route_selection(subnet);
}
}
/// Run route selection for a given subnet.
///
/// This will cause a triggered update if needed.
fn route_selection(&self, subnet: Subnet) {
self.metrics.router_route_selection_ran();
debug!("Running route selection for {subnet}");
let Some(mut routes) = self.routing_table.routes_mut(subnet) else {
// Subnet not known
return;
};
// If there is no selected route there is nothing to do here. We keep expired routes in the
// table for a while so updates of those should already have propagated to peers.
let route_list = routes.routes();
// If we have a new selected route we must have at least 1 item in the route list so
// accessing the 0th list element here is fine.
if let Some(new_selected) = self.find_best_route(&route_list).cloned() {
if new_selected.neighbour() == route_list[0].neighbour() && route_list[0].selected() {
debug!(
"New selected route for {subnet} is the same as the route alreayd installed"
);
return;
}
if new_selected.metric().is_infinite()
&& route_list[0].metric().is_infinite()
&& route_list[0].selected()
{
debug!("New selected route for {subnet} is retracted, like the previously selected route");
return;
}
routes.set_selected(new_selected.neighbour());
} else if !route_list.is_empty() && route_list[0].selected() {
// This means we went from a selected route to a non-selected route. Unselect route and
// trigger update.
// At this point we also send a seqno request to all peers which advertised this route
// to us, to try and get an updated entry. This uses the source key of the unselected
// entry.
self.send_seqno_request(route_list[0].source(), None, None);
routes.unselect();
}
drop(routes);
self.trigger_update(subnet, None);
}
/// Remove expired source keys from the router state.
async fn process_expired_source_keys(
self,
mut expired_source_key_stream: mpsc::Receiver<SourceKey>,
) {
while let Some(sk) = expired_source_key_stream.recv().await {
debug!("Removing expired source entry {sk}");
self.source_table.write().unwrap().remove(&sk);
self.metrics.router_source_key_expired();
}
warn!("Expired source key processing halted");
}
/// Remove expired route keys from the router state.
async fn process_expired_route_keys(
self,
mut expired_route_key_stream: mpsc::Receiver<RouteKey>,
) {
while let Some(rk) = expired_route_key_stream.recv().await {
let subnet = rk.subnet();
debug!(route.subnet = %subnet, "Got expiration event for route");
// Scope mutable access to routes.
// TODO: Is this really needed?
{
// Load current key
let Some(mut routes) = self.routing_table.routes_mut(rk.subnet()) else {
// Subnet now known anymore. This means an expiration timer fired while the entry
// itself is gone already.
warn!(%subnet, "Route key expired for unknown subnet");
continue;
};
let route_selection =
routes.update_routes(|routes, eres, ct| {
let Some(mut entry) = routes
.iter_mut()
.find(|re| re.neighbour() == rk.neighbour())
else {
return false;
};
self.metrics.router_route_key_expired(!entry.selected());
if entry.selected() {
debug!(%subnet, peer = rk.neighbour().connection_identifier(), "Selected route expired, increasing metric to infinity");
entry.set_metric(Metric::infinite());
entry.set_expires(tokio::time::Instant::now() + RETRACTED_ROUTE_HOLD_TIME, eres.clone(), ct.clone());
} else {
debug!(%subnet, peer = rk.neighbour().connection_identifier(), "Unselected route expired, removing fallback route");
routes.remove(rk.neighbour());
// Removing a fallback route does not require any further changes. It does
// not affect the selected route and by extension does not require a
// triggered udpate.
return false;
}
true
});
if !route_selection {
continue;
}
// Re run route selection if this was the selected route. We should do this before
// publishing to potentially select a new route, however a time based expiraton of a
// selected route generally means no other routes are viable anyway, so the short lived
// black hole this could create is not really a concern.
self.metrics.router_selected_route_expired();
// Only inject selected route if we are simply retracting it, otherwise it is
// actually already removed.
debug!("Rerun route selection after expiration event");
if let Some(r) = self.find_best_route(&routes.routes()).cloned() {
routes.set_selected(r.neighbour());
} else {
debug!("Route selection did not find a viable route, unselect existing routes");
routes.unselect();
}
}
// TODO: Is this _always_ needed?
self.trigger_update(subnet, None);
}
warn!("Expired route key processing halted");
}
/// Process notifications about peers who are dead. This allows peers who can self-diagnose
/// connection states to notify us, and allow for more efficient cleanup.
async fn process_dead_peers(self, mut dead_peer_stream: mpsc::Receiver<Peer>) {
let mut tx_buf = Vec::with_capacity(100);
loop {
let received = dead_peer_stream.recv_many(&mut tx_buf, 100).await;
if received == 0 {
break;
}
self.handle_dead_peer(&tx_buf[..received]);
tx_buf.clear();
}
warn!("Processing of dead peers halted");
}
/// Task which ingests and processes control packets. This spawns another background task for
/// every TLV type, and forwards the inbound packets to the proper background task.
async fn handle_incoming_control_packet(
self,
mut router_control_rx: UnboundedReceiver<(ControlPacket, Peer)>,
) {
let (hello_tx, hello_rx) = mpsc::unbounded_channel();
let (ihu_tx, ihu_rx) = mpsc::unbounded_channel();
let (update_tx, update_rx) = mpsc::channel(1_000_000);
let (rr_tx, rr_rx) = mpsc::unbounded_channel();
let (sn_tx, sn_rx) = mpsc::channel(100_000);
tokio::spawn(self.clone().hello_processor(hello_rx));
tokio::spawn(self.clone().ihu_processor(ihu_rx));
tokio::spawn(self.clone().update_processor(update_rx));
tokio::spawn(self.clone().route_request_processor(rr_rx));
tokio::spawn(self.clone().seqno_request_processor(sn_rx));
while let Some((control_packet, source_peer)) = router_control_rx.recv().await {
// First update metrics with the remaining outstanding TLV's
self.metrics.router_received_tlv();
trace!(
"Received control packet from {}",
source_peer.connection_identifier()
);
// Route packet to proper work queue.
match control_packet {
babel::Tlv::Hello(hello) => {
if hello_tx.send((hello, source_peer)).is_err() {
break;
};
}
babel::Tlv::Ihu(ihu) => {
if ihu_tx.send((ihu, source_peer)).is_err() {
break;
};
}
babel::Tlv::Update(update) => {
if let Err(e) = update_tx.try_send((update, source_peer)) {
match e {
mpsc::error::TrySendError::Closed(_) => {
self.metrics.router_tlv_discarded();
break;
}
mpsc::error::TrySendError::Full(update) => {
// If the metric is directly connected (0), always process the
// update.
if update.0.metric().is_direct() {
// Channel disconnected
if update_tx.send(update).await.is_err() {
self.metrics.router_tlv_discarded();
break;
}
} else {
self.metrics.router_tlv_discarded();
}
}
}
};
}
babel::Tlv::RouteRequest(route_request) => {
if rr_tx.send((route_request, source_peer)).is_err() {
break;
};
}
babel::Tlv::SeqNoRequest(seqno_request) => {
if let Err(e) = sn_tx.try_send((seqno_request, source_peer)) {
match e {
mpsc::error::TrySendError::Closed(_) => {
self.metrics.router_tlv_discarded();
break;
}
mpsc::error::TrySendError::Full(_) => {
self.metrics.router_tlv_discarded();
}
}
};
}
}
}
}
/// Background task to process hello TLV's.
async fn hello_processor(self, mut hello_rx: UnboundedReceiver<(Hello, Peer)>) {
while let Some((hello, source_peer)) = hello_rx.recv().await {
let start = std::time::Instant::now();
if !source_peer.alive() {
trace!("Dropping Hello TLV since sender is dead.");
self.metrics.router_tlv_source_died();
continue;
}
self.handle_incoming_hello(hello, source_peer);
self.metrics
.router_time_spent_handling_tlv(start.elapsed(), "hello");
}
}
/// Background task to process IHU TLV's.
async fn ihu_processor(self, mut ihu_rx: UnboundedReceiver<(Ihu, Peer)>) {
while let Some((ihu, source_peer)) = ihu_rx.recv().await {
let start = std::time::Instant::now();
if !source_peer.alive() {
trace!("Dropping IHU TLV since sender is dead.");
self.metrics.router_tlv_source_died();
continue;
}
self.handle_incoming_ihu(ihu, source_peer);
self.metrics
.router_time_spent_handling_tlv(start.elapsed(), "ihu");
}
}
/// Background task to process Update TLV's.
async fn update_processor(self, mut update_rx: Receiver<(Update, Peer)>) {
let mut senders = Vec::with_capacity(self.update_workers);
for _ in 0..self.update_workers {
let router = self.clone();
let (tx, mut rx) = mpsc::channel::<(_, Peer)>(1_000_000);
tokio::task::spawn_blocking(move || {
while let Some((update, source_peer)) = rx.blocking_recv() {
let start = std::time::Instant::now();
if !source_peer.alive() {
trace!("Dropping Update TLV since sender is dead.");
router.metrics.router_tlv_source_died();
continue;
}
router.handle_incoming_update(update, source_peer);
router
.metrics
.router_time_spent_handling_tlv(start.elapsed(), "update");
}
warn!("Update processor task exitted");
});
senders.push(tx);
}
while let Some(item) = update_rx.recv().await {
let mut hasher = ahash::AHasher::default();
item.0.subnet().network().hash(&mut hasher);
let slot = hasher.finish() as usize % self.update_workers;
if let Err(e) = senders[slot].try_send(item) {
match e {
mpsc::error::TrySendError::Closed(_) => {
self.metrics.router_tlv_discarded();
break;
}
mpsc::error::TrySendError::Full(update) => {
// If the metric is directly connected (0), always process the
// update.
if update.0.metric().is_direct() {
// Channel disconnected
if senders[slot].send(update).await.is_err() {
self.metrics.router_tlv_discarded();
break;
}
} else {
self.metrics.router_tlv_discarded();
}
}
}
};
}
warn!("Update processor coordinator exitted");
}
/// Background task to process Route Request TLV's.
async fn route_request_processor(self, mut rr_rx: UnboundedReceiver<(RouteRequest, Peer)>) {
while let Some((rr, source_peer)) = rr_rx.recv().await {
let start = std::time::Instant::now();
if !source_peer.alive() {
trace!("Dropping Route request TLV since sender is dead.");
self.metrics.router_tlv_source_died();
continue;
}
self.handle_incoming_route_request(rr, source_peer);
self.metrics
.router_time_spent_handling_tlv(start.elapsed(), "route_request");
}
}
/// Background task to process Seqno Request TLV's.
async fn seqno_request_processor(self, mut sn_rx: Receiver<(SeqNoRequest, Peer)>) {
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | true |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/lib.rs | mycelium/src/lib.rs | use std::future::Future;
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
use std::path::PathBuf;
#[cfg(feature = "message")]
use std::time::Duration;
use crate::cdn::Cdn;
use crate::proxy::{ConnectionError, Proxy};
use crate::tun::TunConfig;
use bytes::BytesMut;
use data::DataPlane;
use endpoint::Endpoint;
#[cfg(feature = "message")]
use message::TopicConfig;
#[cfg(feature = "message")]
use message::{
MessageId, MessageInfo, MessagePushResponse, MessageStack, PushMessageError, ReceivedMessage,
};
use metrics::Metrics;
use peer_manager::{PeerExists, PeerNotFound, PeerStats, PrivateNetworkKey};
use routing_table::{NoRouteSubnet, QueriedSubnet, RouteEntry};
use subnet::Subnet;
use tokio::net::TcpListener;
use tracing::{error, info, warn};
mod babel;
pub mod cdn;
mod connection;
pub mod crypto;
pub mod data;
mod dns;
pub mod endpoint;
pub mod filters;
mod interval;
#[cfg(feature = "message")]
pub mod message;
mod metric;
pub mod metrics;
pub mod packet;
mod peer;
pub mod peer_manager;
mod proxy;
pub mod router;
mod router_id;
mod routing_table;
mod rr_cache;
mod seqno_cache;
mod sequence_number;
mod source_table;
pub mod subnet;
pub mod task;
mod tun;
/// The prefix of the global subnet used.
pub const GLOBAL_SUBNET_ADDRESS: IpAddr = IpAddr::V6(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 0));
/// The prefix length of the global subnet used.
pub const GLOBAL_SUBNET_PREFIX_LEN: u8 = 7;
/// Config for a mycelium [`Node`].
pub struct Config<M> {
/// The secret key of the node.
pub node_key: crypto::SecretKey,
/// Statically configured peers.
pub peers: Vec<Endpoint>,
/// Tun interface should be disabled.
pub no_tun: bool,
/// Listen port for TCP connections.
pub tcp_listen_port: u16,
/// Listen port for Quic connections.
pub quic_listen_port: Option<u16>,
/// Udp port for peer discovery.
pub peer_discovery_port: Option<u16>,
/// Name for the TUN device.
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
pub tun_name: String,
/// Configuration for a private network, if run in that mode. To enable private networking,
/// this must be a name + a PSK.
pub private_network_config: Option<(String, PrivateNetworkKey)>,
/// Implementation of the `Metrics` trait, used to expose information about the system
/// internals.
pub metrics: M,
/// Mark that's set on all packets that we send on the underlying network
pub firewall_mark: Option<u32>,
// tun_fd is android, iOS, macos on appstore specific option
// We can't create TUN device from the Rust code in android, iOS, and macos on appstore.
// So, we create the TUN device on Kotlin(android) or Swift(iOS, macos) then pass
// the TUN's file descriptor to mycelium.
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
pub tun_fd: Option<i32>,
/// The maount of worker tasks spawned to process updates. Up to this amound of updates can be
/// processed in parallel. Because processing an update is a CPU bound task, it is pointless to
/// set this to a value which is higher than the amount of logical CPU cores available to the
/// system.
pub update_workers: usize,
pub cdn_cache: Option<PathBuf>,
/// Enable dns resolver. This binds to port 53
pub enable_dns: bool,
/// Configuration for message topics, if this is not set the default config will be used.
#[cfg(feature = "message")]
pub topic_config: Option<TopicConfig>,
}
/// The Node is the main structure in mycelium. It governs the entire data flow.
pub struct Node<M> {
router: router::Router<M>,
peer_manager: peer_manager::PeerManager<M>,
_dns: Option<dns::Resolver>,
_cdn: Option<Cdn>,
proxy: Proxy<M>,
#[cfg(feature = "message")]
message_stack: message::MessageStack<M>,
}
/// General info about a node.
pub struct NodeInfo {
/// The overlay subnet in use by the node.
pub node_subnet: Subnet,
/// The public key of the node
pub node_pubkey: crypto::PublicKey,
}
impl<M> Node<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Setup a new `Node` with the provided [`Config`].
pub async fn new(config: Config<M>) -> Result<Self, Box<dyn std::error::Error>> {
// If a private network is configured, validate network name
if let Some((net_name, _)) = &config.private_network_config {
if net_name.len() < 2 || net_name.len() > 64 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"network name must be between 2 and 64 characters",
)
.into());
}
}
let node_pub_key = crypto::PublicKey::from(&config.node_key);
let node_addr = node_pub_key.address();
let (tun_tx, tun_rx) = tokio::sync::mpsc::unbounded_channel();
let node_subnet = Subnet::new(
// Truncate last 64 bits of address.
// TODO: find a better way to do this.
Subnet::new(node_addr.into(), 64)
.expect("64 is a valid IPv6 prefix size; qed")
.network(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed");
// Creating a new Router instance
let router = match router::Router::new(
config.update_workers,
tun_tx,
node_subnet,
vec![node_subnet],
(config.node_key, node_pub_key),
vec![
Box::new(filters::AllowedSubnet::new(
Subnet::new(GLOBAL_SUBNET_ADDRESS, GLOBAL_SUBNET_PREFIX_LEN)
.expect("Global subnet is properly defined; qed"),
)),
Box::new(filters::MaxSubnetSize::<64>),
Box::new(filters::RouterIdOwnsSubnet),
],
config.metrics.clone(),
) {
Ok(router) => {
info!(
"Router created. Pubkey: {:x}",
BytesMut::from(&router.node_public_key().as_bytes()[..])
);
router
}
Err(e) => {
error!("Error creating router: {e}");
panic!("Error creating router: {e}");
}
};
// Creating a new PeerManager instance
let pm = peer_manager::PeerManager::new(
router.clone(),
config.peers,
config.tcp_listen_port,
config.quic_listen_port,
config.peer_discovery_port.unwrap_or_default(),
config.peer_discovery_port.is_none(),
config.private_network_config,
config.metrics,
config.firewall_mark,
)?;
info!("Started peer manager");
#[cfg(feature = "message")]
let (tx, rx) = tokio::sync::mpsc::channel(100);
#[cfg(feature = "message")]
let msg_receiver = tokio_stream::wrappers::ReceiverStream::new(rx);
#[cfg(feature = "message")]
let msg_sender = tokio_util::sync::PollSender::new(tx);
#[cfg(not(feature = "message"))]
let msg_sender = futures::sink::drain();
let _data_plane = if config.no_tun {
warn!("Starting data plane without TUN interface, L3 functionality disabled");
DataPlane::new(
router.clone(),
// No tun so create a dummy stream for L3 packets which never yields
tokio_stream::pending(),
// Similarly, create a sink which just discards every packet we would receive
futures::sink::drain(),
msg_sender,
tun_rx,
)
} else {
#[cfg(not(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "android",
target_os = "ios"
)))]
{
panic!("On this platform, you can only run with --no-tun");
}
#[cfg(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "android",
target_os = "ios"
))]
{
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
let tun_config = TunConfig {
name: config.tun_name.clone(),
node_subnet: Subnet::new(node_addr.into(), 64)
.expect("64 is a valid subnet size for IPv6; qed"),
route_subnet: Subnet::new(GLOBAL_SUBNET_ADDRESS, GLOBAL_SUBNET_PREFIX_LEN)
.expect("Static configured TUN route is valid; qed"),
};
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
let tun_config = TunConfig {
tun_fd: config.tun_fd.unwrap(),
};
let (rxhalf, txhalf) = tun::new(tun_config).await?;
info!("Node overlay IP: {node_addr}");
DataPlane::new(router.clone(), rxhalf, txhalf, msg_sender, tun_rx)
}
};
let dns = if config.enable_dns {
Some(dns::Resolver::new().await)
} else {
None
};
let cdn = config.cdn_cache.map(Cdn::new);
if let Some(ref cdn) = cdn {
let listener = TcpListener::bind("localhost:80").await?;
cdn.start(listener)?;
}
let proxy = Proxy::new(router.clone());
#[cfg(feature = "message")]
let ms = MessageStack::new(_data_plane, msg_receiver, config.topic_config);
Ok(Node {
router,
peer_manager: pm,
_dns: dns,
_cdn: cdn,
proxy,
#[cfg(feature = "message")]
message_stack: ms,
})
}
/// Get information about the running `Node`
pub fn info(&self) -> NodeInfo {
NodeInfo {
node_subnet: self.router.node_tun_subnet(),
node_pubkey: self.router.node_public_key(),
}
}
/// Get information about the current peers in the `Node`
pub fn peer_info(&self) -> Vec<PeerStats> {
self.peer_manager.peers()
}
/// Add a new peer to the system identified by an [`Endpoint`].
pub fn add_peer(&self, endpoint: Endpoint) -> Result<(), PeerExists> {
self.peer_manager.add_peer(endpoint)
}
/// Remove an existing peer identified by an [`Endpoint`] from the system.
pub fn remove_peer(&self, endpoint: Endpoint) -> Result<(), PeerNotFound> {
self.peer_manager.delete_peer(&endpoint)
}
/// List all selected [`routes`](RouteEntry) in the system.
pub fn selected_routes(&self) -> Vec<RouteEntry> {
self.router.load_selected_routes()
}
/// List all fallback [`routes`](RouteEntry) in the system.
pub fn fallback_routes(&self) -> Vec<RouteEntry> {
self.router.load_fallback_routes()
}
/// List all [`queried subnets`](QueriedSubnet) in the system.
pub fn queried_subnets(&self) -> Vec<QueriedSubnet> {
self.router.load_queried_subnets()
}
/// List all [`subnets with no route`](NoRouteSubnet) in the system.
pub fn no_route_entries(&self) -> Vec<NoRouteSubnet> {
self.router.load_no_route_entries()
}
/// Get public key from the IP of `Node`
pub fn get_pubkey_from_ip(&self, ip: IpAddr) -> Option<crypto::PublicKey> {
self.router.get_pubkey(ip)
}
}
impl<M> Node<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Starts probing for Socks5 proxies on the network
pub fn start_proxy_scan(&self) {
self.proxy.start_probing()
}
/// Stops any ongoing Socks5 proxy probes on the network
pub fn stop_proxy_scan(&self) {
self.proxy.stop_probing()
}
/// Connect to a remote Socks5 proxy. If [no remote is given](Option::None), the system will
/// try to select a known proxy with the lowest latency.
pub fn connect_proxy(
&self,
remote: Option<SocketAddr>,
) -> impl Future<Output = Result<SocketAddr, ConnectionError>> + Send {
let proxy = self.proxy.clone();
async move { proxy.connect(remote).await }
}
/// Disconnect from a remote Socks5 proxy, stopping all proxied connections as well.
pub fn disconnect_proxy(&self) {
self.proxy.disconnect()
}
/// Get a list of all proxies discovered by the system
pub fn known_proxies(&self) -> Vec<Ipv6Addr> {
self.proxy.known_proxies()
}
}
#[cfg(feature = "message")]
impl<M> Node<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Wait for a messsage to arrive in the message stack.
///
/// An the optional `topic` is provided, only messages which have exactly the same value in
/// `topic` will be returned. The `pop` argument decides if the message is removed from the
/// internal queue or not. If `pop` is `false`, the same message will be returned on the next
/// call (with the same topic).
///
/// This method returns a future which will wait indefinitely until a message is received. It
/// is generally a good idea to put a limit on how long to wait by wrapping this in a [`tokio::time::timeout`].
pub fn get_message(
&self,
pop: bool,
topic: Option<Vec<u8>>,
) -> impl Future<Output = ReceivedMessage> + '_ {
// First reborrow only the message stack from self, then manually construct a future. This
// avoids a lifetime issue on the router, which is not sync. If a regular 'async' fn would
// be used here, we can't specify that at this point sadly.
let ms = &self.message_stack;
async move { ms.message(pop, topic).await }
}
/// Push a new message to the message stack.
///
/// The system will attempt to transmit the message for `try_duration`. A message is considered
/// transmitted when the receiver has indicated it completely received the message. If
/// `subscribe_reply` is `true`, the second return value will be [`Option::Some`], with a
/// watcher which will resolve if a reply for this exact message comes in. Since this relies on
/// the receiver actually sending a reply, ther is no guarantee that this will eventually
/// resolve.
pub fn push_message(
&self,
dst: IpAddr,
data: Vec<u8>,
topic: Option<Vec<u8>>,
try_duration: Duration,
subscribe_reply: bool,
) -> Result<MessagePushResponse, PushMessageError> {
self.message_stack.new_message(
dst,
data,
topic.unwrap_or_default(),
try_duration,
subscribe_reply,
)
}
/// Get the status of a message sent previously.
///
/// Returns [`Option::None`] if no message is found with the given id. Message info is only
/// retained for a limited time after a message has been received, or after the message has
/// been aborted due to a timeout.
pub fn message_status(&self, id: MessageId) -> Option<MessageInfo> {
self.message_stack.message_info(id)
}
/// Send a reply to a previously received message.
pub fn reply_message(
&self,
id: MessageId,
dst: IpAddr,
data: Vec<u8>,
try_duration: Duration,
) -> MessageId {
self.message_stack
.reply_message(id, dst, data, try_duration)
}
/// Get a list of all configured topics
pub fn topics(&self) -> Vec<Vec<u8>> {
self.message_stack.topics()
}
pub fn topic_allowed_sources(&self, topic: &Vec<u8>) -> Option<Vec<Subnet>> {
self.message_stack.topic_allowed_sources(topic)
}
/// Sets the default topic action to accept or reject. This decides how topics which don't have
/// an explicit whitelist get handled.
pub fn accept_unconfigured_topic(&self, accept: bool) {
self.message_stack.set_default_topic_action(accept)
}
/// Whether a topic without default configuration is accepted or not.
pub fn unconfigure_topic_action(&self) -> bool {
self.message_stack.get_default_topic_action()
}
/// Add a topic to the whitelist without any configured allowed sources.
pub fn add_topic_whitelist(&self, topic: Vec<u8>) {
self.message_stack.add_topic_whitelist(topic)
}
/// Remove a topic from the whitelist. Future messages will follow the default action.
pub fn remove_topic_whitelist(&self, topic: Vec<u8>) {
self.message_stack.remove_topic_whitelist(topic)
}
/// Add a new whitelisted source for a topic. This creates the topic if it does not exist yet.
pub fn add_topic_whitelist_src(&self, topic: Vec<u8>, src: Subnet) {
self.message_stack.add_topic_whitelist_src(topic, src)
}
/// Remove a whitelisted source for a topic.
pub fn remove_topic_whitelist_src(&self, topic: Vec<u8>, src: Subnet) {
self.message_stack.remove_topic_whitelist_src(topic, src)
}
/// Set the forward socket for a topic. Creates the topic if it doesn't exist.
pub fn set_topic_forward_socket(&self, topic: Vec<u8>, socket_path: std::path::PathBuf) {
self.message_stack
.set_topic_forward_socket(topic, Some(socket_path))
}
/// Get the forward socket for a topic, if any.
pub fn get_topic_forward_socket(&self, topic: &Vec<u8>) -> Option<std::path::PathBuf> {
self.message_stack.get_topic_forward_socket(topic)
}
/// Removes the forward socket for the topic, if one exists
pub fn delete_topic_forward_socket(&self, topic: Vec<u8>) {
self.message_stack.set_topic_forward_socket(topic, None)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/sequence_number.rs | mycelium/src/sequence_number.rs | //! Dedicated logic for
//! [sequence numbers](https://datatracker.ietf.org/doc/html/rfc8966#name-solving-starvation-sequenci).
use core::fmt;
use core::ops::{Add, AddAssign};
/// This value is compared against when deciding if a `SeqNo` is larger or smaller, [as defined in
/// the babel rfc](https://datatracker.ietf.org/doc/html/rfc8966#section-3.2.1).
const SEQNO_COMPARE_TRESHOLD: u16 = 32_768;
/// A sequence number on a route.
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SeqNo(u16);
impl SeqNo {
/// Create a new `SeqNo` with the default value.
pub fn new() -> Self {
Self::default()
}
/// Custom PartialOrd implementation as defined in [the babel rfc](https://datatracker.ietf.org/doc/html/rfc8966#section-3.2.1).
/// Note that we don't implement the [`PartialOrd`](std::cmd::PartialOrd) trait, as the contract on
/// that trait specifically defines that it is transitive, which is clearly not the case here.
///
/// There is a quirk in this equality comparison where values which are exactly 32_768 apart,
/// will result in false in either way of ordering the arguments, which is counterintuitive to
/// our understanding that a < b generally implies !(b < a).
pub fn lt(&self, other: &Self) -> bool {
if self.0 == other.0 {
false
} else {
other.0.wrapping_sub(self.0) < SEQNO_COMPARE_TRESHOLD
}
}
/// Custom PartialOrd implementation as defined in [the babel rfc](https://datatracker.ietf.org/doc/html/rfc8966#section-3.2.1).
/// Note that we don't implement the [`PartialOrd`](std::cmd::PartialOrd) trait, as the contract on
/// that trait specifically defines that it is transitive, which is clearly not the case here.
///
/// There is a quirk in this equality comparison where values which are exactly 32_768 apart,
/// will result in false in either way of ordering the arguments, which is counterintuitive to
/// our understanding that a < b generally implies !(b < a).
pub fn gt(&self, other: &Self) -> bool {
if self.0 == other.0 {
false
} else {
other.0.wrapping_sub(self.0) > SEQNO_COMPARE_TRESHOLD
}
}
}
impl fmt::Display for SeqNo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{}", self.0))
}
}
impl From<u16> for SeqNo {
fn from(value: u16) -> Self {
SeqNo(value)
}
}
impl From<SeqNo> for u16 {
fn from(value: SeqNo) -> Self {
value.0
}
}
impl Add<u16> for SeqNo {
type Output = Self;
fn add(self, rhs: u16) -> Self::Output {
SeqNo(self.0.wrapping_add(rhs))
}
}
impl AddAssign<u16> for SeqNo {
fn add_assign(&mut self, rhs: u16) {
*self = SeqNo(self.0.wrapping_add(rhs))
}
}
#[cfg(test)]
mod tests {
use super::SeqNo;
#[test]
fn cmp_eq_seqno() {
let s1 = SeqNo::from(1);
let s2 = SeqNo::from(1);
assert_eq!(s1, s2);
let s1 = SeqNo::from(10_000);
let s2 = SeqNo::from(10_000);
assert_eq!(s1, s2);
}
#[test]
fn cmp_small_seqno_increase() {
let s1 = SeqNo::from(1);
let s2 = SeqNo::from(2);
assert!(s1.lt(&s2));
assert!(!s2.lt(&s1));
assert!(s2.gt(&s1));
assert!(!s1.gt(&s2));
let s1 = SeqNo::from(3);
let s2 = SeqNo::from(30_000);
assert!(s1.lt(&s2));
assert!(!s2.lt(&s1));
assert!(s2.gt(&s1));
assert!(!s1.gt(&s2));
}
#[test]
fn cmp_big_seqno_increase() {
let s1 = SeqNo::from(0);
let s2 = SeqNo::from(32_767);
assert!(s1.lt(&s2));
assert!(!s2.lt(&s1));
assert!(s2.gt(&s1));
assert!(!s1.gt(&s2));
// Test equality quirk at cutoff point.
let s1 = SeqNo::from(0);
let s2 = SeqNo::from(32_768);
assert!(!s1.lt(&s2));
assert!(!s2.lt(&s1));
assert!(!s2.gt(&s1));
assert!(!s1.gt(&s2));
let s1 = SeqNo::from(0);
let s2 = SeqNo::from(32_769);
assert!(!s1.lt(&s2));
assert!(s2.lt(&s1));
assert!(!s2.gt(&s1));
assert!(s1.gt(&s2));
let s1 = SeqNo::from(6);
let s2 = SeqNo::from(60_000);
assert!(!s1.lt(&s2));
assert!(s2.lt(&s1));
assert!(!s2.gt(&s1));
assert!(s1.gt(&s2));
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/filters.rs | mycelium/src/filters.rs | use crate::{babel, subnet::Subnet};
/// This trait is used to filter incoming updates from peers. Only updates which pass all
/// configured filters on the local [`Router`](crate::router::Router) will actually be forwarded
/// to the [`Router`](crate::router::Router) for processing.
pub trait RouteUpdateFilter {
/// Judge an incoming update.
fn allow(&self, update: &babel::Update) -> bool;
}
/// Limit the subnet size of subnets announced in updates to be at most `N` bits. Note that "at
/// most" here means that the actual prefix length needs to be **AT LEAST** this value.
pub struct MaxSubnetSize<const N: u8>;
impl<const N: u8> RouteUpdateFilter for MaxSubnetSize<N> {
fn allow(&self, update: &babel::Update) -> bool {
update.subnet().prefix_len() >= N
}
}
/// Limit the subnet announced to be included in the given subnet.
pub struct AllowedSubnet {
subnet: Subnet,
}
impl AllowedSubnet {
/// Create a new `AllowedSubnet` filter, which only allows updates who's `Subnet` is contained
/// in the given `Subnet`.
pub fn new(subnet: Subnet) -> Self {
Self { subnet }
}
}
impl RouteUpdateFilter for AllowedSubnet {
fn allow(&self, update: &babel::Update) -> bool {
self.subnet.contains_subnet(&update.subnet())
}
}
/// Limit the announced subnets to those which contain the derived IP from the `RouterId`.
///
/// Since retractions can be sent by any node to indicate they don't have a route for the subnet,
/// these are also allowed.
pub struct RouterIdOwnsSubnet;
impl RouteUpdateFilter for RouterIdOwnsSubnet {
fn allow(&self, update: &babel::Update) -> bool {
update.metric().is_infinite()
|| update
.subnet()
.contains_ip(update.router_id().to_pubkey().address().into())
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/dns.rs | mycelium/src/dns.rs | //! Built in dns resolver which intercepts dns lookups for defined TLD's and redirects them to a
//! different upstream server.
use hickory_server::authority::MessageResponseBuilder;
use hickory_server::proto::op::Header;
use hickory_server::proto::rr::{LowerName, Record};
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use hickory_server::ServerFuture;
use tokio::net::UdpSocket;
use tracing::{debug, error, info};
pub struct Resolver {
server: ServerFuture<Handler>,
}
pub struct Handler {
_intercepted_zones: Vec<LowerName>,
resolver: hickory_resolver::TokioResolver,
}
impl Resolver {
/// Create a new resolver instance
pub async fn new() -> Self {
let resolver = hickory_resolver::TokioResolver::builder_tokio()
.expect("Can create tokio resolver builder")
.build();
let handler = Handler {
_intercepted_zones: Vec::new(),
resolver,
};
let mut server = hickory_server::server::ServerFuture::new(handler);
let udp_socket = UdpSocket::bind("[::]:53")
.await
.expect("Can bind udp port 53");
server.register_socket(udp_socket);
Self { server }
}
}
#[async_trait::async_trait]
impl RequestHandler for Handler {
#[tracing::instrument(skip_all, Level = DEBUG)]
async fn handle_request<R>(&self, request: &Request, mut response_handle: R) -> ResponseInfo
where
R: ResponseHandler,
{
let mut answers = vec![];
// We only handle queries. Anything which isn't a query shouldn't be targeted at us anyway,
// since we only act as a (recursive) resolver
for query in request.queries() {
// Check if this is a query we want to redirect
// NOTE: don't use query.query_type.is_ip_addr() since that matches requests for both A
// and AAAA records, while we obviously only care about AAAA records.
// if query.query_class() == DNSClass::IN && query.query_type() == RecordType::AAAA {
// let mut intercepted = false;
// for zone in &self.intercepted_zones {
// if zone.zone_of(query.name()) {
// intercepted = true;
// // TODO: fetch answer
// todo!();
// }
// }
// if !intercepted {
// // Send to system upstream
// let res = self.resolver.ipv6_lookup(query.name()).await;
// match res {
// Ok(record) => {}
// Err(err) => {
// debug!(%err, domain = %query.name(), "IPv6 record lookup failed");
// let resp = MessageResponseBuilder::from_message_request(request)
// .error_msg(
// &Header::response_from_request(request.header()),
// ResponseCode::ServFail,
// );
// if let Err(err) = response_handle.send_response(resp).await {
// debug!(%err, "Failed to send response");
// }
// }
// }
// }
// } else {
// // Send to system upstream
// todo!();
// }
match self.resolver.lookup(query.name(), query.query_type()).await {
Ok(lookup) => {
answers.push(lookup);
}
Err(err) => {
error!(%err, name = %query.name(), class = %query.query_type(), "Could not resolve query");
}
}
}
let answers = answers
.into_iter()
.flat_map(|lookup| {
let ttl = lookup
.valid_until()
.duration_since(std::time::Instant::now())
.as_secs() as u32;
let name = lookup.query().name().clone();
lookup
.into_iter()
.zip(std::iter::repeat(name))
.map(move |(rdata, name)| {
info!(%name, record = %rdata, "Resolution done");
Record::from_rdata(name, ttl, rdata)
})
})
.collect::<Vec<_>>();
let responses = MessageResponseBuilder::from_message_request(request);
let resp = responses.build(
Header::response_from_request(request.header()),
&answers,
[],
[],
[],
);
match response_handle.send_response(resp).await {
Ok(resp_info) => resp_info,
Err(err) => {
debug!(%err, "Failed to send response");
Header::response_from_request(request.header()).into()
}
}
}
}
impl Drop for Resolver {
fn drop(&mut self) {
self.server.shutdown_token().cancel()
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/connection.rs | mycelium/src/connection.rs | use std::{
future::Future,
io,
net::SocketAddr,
pin::Pin,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
};
use crate::packet::{self, ControlPacket, DataPacket, Packet};
use bytes::{Bytes, BytesMut};
use futures::{
stream::{SplitSink, SplitStream},
SinkExt, StreamExt,
};
use tokio::io::{AsyncRead, AsyncWrite};
mod tracked;
use tokio_util::codec::{Decoder, Encoder, Framed};
pub use tracked::Tracked;
#[cfg(feature = "private-network")]
pub mod tls;
/// Cost to add to the peer_link_cost for "local processing", when peers are connected over IPv6.
///
/// The current peer link cost is calculated from a HELLO rtt. This is great to measure link
/// latency, since packets are processed in order. However, on local idle links, this value will
/// likely be 0 since we round down (from the amount of ms it took to process), which does not
/// accurately reflect the fact that there is in fact a cost associated with using a peer, even on
/// these local links.
const PACKET_PROCESSING_COST_IP6_TCP: u16 = 10;
/// Cost to add to the peer_link_cost for "local processing", when peers are connected over IPv6.
///
/// This is similar to [`PACKET_PROCESSING_COST_IP6`], but slightly higher so we skew towards IPv6
/// connections if peers are connected over both IPv4 and IPv6.
const PACKET_PROCESSING_COST_IP4_TCP: u16 = 15;
// TODO
const PACKET_PROCESSING_COST_IP6_QUIC: u16 = 7;
// TODO
const PACKET_PROCESSING_COST_IP4_QUIC: u16 = 12;
pub trait ConnectionReadHalf: Send {
/// Receive a packet from the remote end.
fn receive_packet(&mut self) -> impl Future<Output = Option<io::Result<Packet>>> + Send;
}
pub trait ConnectionWriteHalf: Send {
/// Feeds a data packet on the connection. Depending on the connection you might need to call
/// [`Connection::flush`] before the packet is actually sent.
fn feed_data_packet(
&mut self,
packet: DataPacket,
) -> impl Future<Output = io::Result<()>> + Send;
/// Feeds a control packet on the connection. Depending on the connection you might need to call
/// [`Connection::flush`] before the packet is actually sent.
fn feed_control_packet(
&mut self,
packet: ControlPacket,
) -> impl Future<Output = io::Result<()>> + Send;
/// Flush the connection. This sends all buffered packets which haven't beend sent yet.
fn flush(&mut self) -> impl Future<Output = io::Result<()>> + Send;
}
pub trait Connection {
type ReadHalf: ConnectionReadHalf;
type WriteHalf: ConnectionWriteHalf;
/// Feeds a data packet on the connection. Depending on the connection you might need to call
/// [`Connection::flush`] before the packet is actually sent.
fn feed_data_packet(
&mut self,
packet: DataPacket,
) -> impl Future<Output = io::Result<()>> + Send;
/// Feeds a control packet on the connection. Depending on the connection you might need to call
/// [`Connection::flush`] before the packet is actually sent.
fn feed_control_packet(
&mut self,
packet: ControlPacket,
) -> impl Future<Output = io::Result<()>> + Send;
/// Flush the connection. This sends all buffered packets which haven't beend sent yet.
fn flush(&mut self) -> impl Future<Output = io::Result<()>> + Send;
/// Receive a packet from the remote end.
fn receive_packet(&mut self) -> impl Future<Output = Option<io::Result<Packet>>> + Send;
/// Get an identifier for this connection, which shows details about the remote
fn identifier(&self) -> Result<String, io::Error>;
/// The static cost of using this connection
fn static_link_cost(&self) -> Result<u16, io::Error>;
/// Split the connection in a read and write half which can be used independently
fn split(self) -> (Self::ReadHalf, Self::WriteHalf);
}
/// A wrapper about an asynchronous (non blocking) tcp stream.
pub struct TcpStream {
framed: Framed<Tracked<tokio::net::TcpStream>, packet::Codec>,
local_addr: SocketAddr,
peer_addr: SocketAddr,
}
impl TcpStream {
/// Create a new wrapped [`TcpStream`] which implements the [`Connection`] trait.
pub fn new(
tcp_stream: tokio::net::TcpStream,
read: Arc<AtomicU64>,
write: Arc<AtomicU64>,
) -> io::Result<Self> {
Ok(Self {
local_addr: tcp_stream.local_addr()?,
peer_addr: tcp_stream.peer_addr()?,
framed: Framed::new(Tracked::new(read, write, tcp_stream), packet::Codec::new()),
})
}
}
impl Connection for TcpStream {
type ReadHalf = TcpStreamReadHalf;
type WriteHalf = TcpStreamWriteHalf;
async fn feed_data_packet(&mut self, packet: DataPacket) -> io::Result<()> {
self.framed.feed(Packet::DataPacket(packet)).await
}
async fn feed_control_packet(&mut self, packet: ControlPacket) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn receive_packet(&mut self) -> Option<io::Result<Packet>> {
self.framed.next().await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
fn identifier(&self) -> Result<String, io::Error> {
Ok(format!("TCP {} <-> {}", self.local_addr, self.peer_addr))
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(match self.peer_addr {
SocketAddr::V4(_) => PACKET_PROCESSING_COST_IP4_TCP,
SocketAddr::V6(ip) if ip.ip().to_ipv4_mapped().is_some() => {
PACKET_PROCESSING_COST_IP4_TCP
}
SocketAddr::V6(_) => PACKET_PROCESSING_COST_IP6_TCP,
})
}
fn split(self) -> (Self::ReadHalf, Self::WriteHalf) {
let (tx, rx) = self.framed.split();
(
TcpStreamReadHalf { framed: rx },
TcpStreamWriteHalf { framed: tx },
)
}
}
pub struct TcpStreamReadHalf {
framed: SplitStream<Framed<Tracked<tokio::net::TcpStream>, packet::Codec>>,
}
impl ConnectionReadHalf for TcpStreamReadHalf {
async fn receive_packet(&mut self) -> Option<io::Result<Packet>> {
self.framed.next().await
}
}
pub struct TcpStreamWriteHalf {
framed: SplitSink<Framed<Tracked<tokio::net::TcpStream>, packet::Codec>, packet::Packet>,
}
impl ConnectionWriteHalf for TcpStreamWriteHalf {
async fn feed_data_packet(&mut self, packet: DataPacket) -> io::Result<()> {
self.framed.feed(Packet::DataPacket(packet)).await
}
async fn feed_control_packet(&mut self, packet: ControlPacket) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
}
/// A wrapper around a quic send and quic receive stream, implementing the [`Connection`] trait.
pub struct Quic {
framed: Framed<Tracked<QuicStream>, packet::Codec>,
con: quinn::Connection,
read: Arc<AtomicU64>,
write: Arc<AtomicU64>,
}
struct QuicStream {
tx: quinn::SendStream,
rx: quinn::RecvStream,
}
impl Quic {
/// Create a new wrapper around Quic streams.
pub fn new(
tx: quinn::SendStream,
rx: quinn::RecvStream,
con: quinn::Connection,
read: Arc<AtomicU64>,
write: Arc<AtomicU64>,
) -> Self {
Quic {
framed: Framed::new(
Tracked::new(read.clone(), write.clone(), QuicStream { tx, rx }),
packet::Codec::new(),
),
con,
read,
write,
}
}
}
impl AsyncRead for QuicStream {
#[inline]
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<io::Result<()>> {
Pin::new(&mut self.rx).poll_read(cx, buf)
}
}
impl AsyncWrite for QuicStream {
#[inline]
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.tx)
.poll_write(cx, buf)
.map_err(From::from)
}
#[inline]
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), io::Error>> {
Pin::new(&mut self.tx).poll_flush(cx)
}
#[inline]
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), io::Error>> {
Pin::new(&mut self.tx).poll_shutdown(cx)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.tx).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.tx.is_write_vectored()
}
}
impl Connection for Quic {
type ReadHalf = QuicReadHalf;
type WriteHalf = QuicWriteHalf;
async fn feed_data_packet(&mut self, packet: DataPacket) -> io::Result<()> {
let mut codec = packet::Codec::new();
let mut buffer = BytesMut::with_capacity(1500);
codec.encode(Packet::DataPacket(packet), &mut buffer)?;
let data: Bytes = buffer.into();
let tx_len = data.len();
self.write.fetch_add(tx_len as u64, Ordering::Relaxed);
self.con.send_datagram(data).map_err(io::Error::other)
}
async fn feed_control_packet(&mut self, packet: ControlPacket) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn receive_packet(&mut self) -> Option<io::Result<Packet>> {
tokio::select! {
datagram = self.con.read_datagram() => {
let datagram_bytes = match datagram {
Ok(buffer) => buffer,
Err(e) => return Some(Err(e.into())),
};
let recv_len = datagram_bytes.len();
self.read.fetch_add(recv_len as u64, Ordering::Relaxed);
let mut codec = packet::Codec::new();
match codec.decode(&mut datagram_bytes.into()) {
Ok(Some(packet)) => Some(Ok(packet)),
// Partial? packet read. We consider this to be a stream hangup
// TODO: verify
Ok(None) => None,
Err(e) => Some(Err(e)),
}
},
packet = self.framed.next() => {
packet
}
}
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
fn identifier(&self) -> Result<String, io::Error> {
Ok(format!("QUIC -> {}", self.con.remote_address()))
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(match self.con.remote_address() {
SocketAddr::V4(_) => PACKET_PROCESSING_COST_IP4_QUIC,
SocketAddr::V6(ip) if ip.ip().to_ipv4_mapped().is_some() => {
PACKET_PROCESSING_COST_IP4_QUIC
}
SocketAddr::V6(_) => PACKET_PROCESSING_COST_IP6_QUIC,
})
}
fn split(self) -> (Self::ReadHalf, Self::WriteHalf) {
let Self {
framed,
con,
read,
write,
} = self;
let (tx, rx) = framed.split();
(
QuicReadHalf {
framed: rx,
con: con.clone(),
read,
},
QuicWriteHalf {
framed: tx,
con,
write,
},
)
}
}
pub struct QuicReadHalf {
framed: SplitStream<Framed<Tracked<QuicStream>, packet::Codec>>,
con: quinn::Connection,
read: Arc<AtomicU64>,
}
pub struct QuicWriteHalf {
framed: SplitSink<Framed<Tracked<QuicStream>, packet::Codec>, packet::Packet>,
con: quinn::Connection,
write: Arc<AtomicU64>,
}
impl ConnectionReadHalf for QuicReadHalf {
async fn receive_packet(&mut self) -> Option<io::Result<Packet>> {
tokio::select! {
datagram = self.con.read_datagram() => {
let datagram_bytes = match datagram {
Ok(buffer) => buffer,
Err(e) => return Some(Err(e.into())),
};
let recv_len = datagram_bytes.len();
self.read.fetch_add(recv_len as u64, Ordering::Relaxed);
let mut codec = packet::Codec::new();
match codec.decode(&mut datagram_bytes.into()) {
Ok(Some(packet)) => Some(Ok(packet)),
// Partial? packet read. We consider this to be a stream hangup
// TODO: verify
Ok(None) => None,
Err(e) => Some(Err(e)),
}
},
packet = self.framed.next() => {
packet
}
}
}
}
impl ConnectionWriteHalf for QuicWriteHalf {
async fn feed_data_packet(&mut self, packet: DataPacket) -> io::Result<()> {
let mut codec = packet::Codec::new();
let mut buffer = BytesMut::with_capacity(1500);
codec.encode(Packet::DataPacket(packet), &mut buffer)?;
let data: Bytes = buffer.into();
let tx_len = data.len();
self.write.fetch_add(tx_len as u64, Ordering::Relaxed);
self.con.send_datagram(data).map_err(io::Error::other)
}
async fn feed_control_packet(&mut self, packet: ControlPacket) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
}
#[cfg(test)]
/// Wrapper for an in-memory pipe implementing the [`Connection`] trait.
pub struct DuplexStream {
framed: Framed<tokio::io::DuplexStream, packet::Codec>,
}
#[cfg(test)]
impl DuplexStream {
/// Create a new in memory duplex stream.
pub fn new(duplex: tokio::io::DuplexStream) -> Self {
Self {
framed: Framed::new(duplex, packet::Codec::new()),
}
}
}
#[cfg(test)]
impl Connection for DuplexStream {
type ReadHalf = DuplexStreamReadHalf;
type WriteHalf = DuplexStreamWriteHalf;
async fn feed_data_packet(&mut self, packet: DataPacket) -> io::Result<()> {
self.framed.feed(Packet::DataPacket(packet)).await
}
async fn feed_control_packet(&mut self, packet: ControlPacket) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn receive_packet(&mut self) -> Option<io::Result<Packet>> {
self.framed.next().await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
fn identifier(&self) -> Result<String, io::Error> {
Ok("Memory pipe".to_string())
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(1)
}
fn split(self) -> (Self::ReadHalf, Self::WriteHalf) {
let (tx, rx) = self.framed.split();
(
DuplexStreamReadHalf { framed: rx },
DuplexStreamWriteHalf { framed: tx },
)
}
}
#[cfg(test)]
pub struct DuplexStreamReadHalf {
framed: SplitStream<Framed<tokio::io::DuplexStream, packet::Codec>>,
}
#[cfg(test)]
pub struct DuplexStreamWriteHalf {
framed: SplitSink<Framed<tokio::io::DuplexStream, packet::Codec>, packet::Packet>,
}
#[cfg(test)]
impl ConnectionReadHalf for DuplexStreamReadHalf {
async fn receive_packet(&mut self) -> Option<io::Result<Packet>> {
self.framed.next().await
}
}
#[cfg(test)]
impl ConnectionWriteHalf for DuplexStreamWriteHalf {
async fn feed_data_packet(&mut self, packet: DataPacket) -> io::Result<()> {
self.framed.feed(Packet::DataPacket(packet)).await
}
async fn feed_control_packet(&mut self, packet: ControlPacket) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/cdn.rs | mycelium/src/cdn.rs | use std::path::PathBuf;
use aes_gcm::{aead::Aead, KeyInit};
use axum::{
extract::{Query, State},
http::{HeaderMap, StatusCode},
routing::get,
Router,
};
use axum_extra::extract::Host;
use futures::{stream::FuturesUnordered, StreamExt};
use reqwest::header::CONTENT_TYPE;
use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
/// Cdn functionality. Urls of specific format lead to donwnlaoding of metadata from the registry,
/// and serving of chunks.
pub struct Cdn {
cache: PathBuf,
cancel_token: CancellationToken,
}
/// Cache for reconstructed blocks
#[derive(Clone)]
struct Cache {
base: PathBuf,
}
impl Cdn {
pub fn new(cache: PathBuf) -> Self {
let cancel_token = CancellationToken::new();
Self {
cache,
cancel_token,
}
}
/// Start the Cdn server. This future runs until the server is stopped.
pub fn start(&self, listener: TcpListener) -> Result<(), Box<dyn std::error::Error>> {
let state = Cache {
base: self.cache.clone(),
};
if !self.cache.exists() {
info!(dir = %self.cache.display(), "Creating cache dir");
std::fs::create_dir(&self.cache)?;
}
if !self.cache.is_dir() {
return Err("Cache dir is not a directory".into());
}
let router = Router::new().route("/", get(cdn)).with_state(state);
let cancel_token = self.cancel_token.clone();
tokio::spawn(async {
axum::serve(listener, router)
.with_graceful_shutdown(cancel_token.cancelled_owned())
.await
.map_err(|err| {
warn!(%err, "Cdn server error");
})
});
Ok(())
}
}
#[derive(Debug, serde::Deserialize)]
struct DecryptionKeyQuery {
key: Option<String>,
}
#[tracing::instrument(level = tracing::Level::DEBUG, skip(cache))]
async fn cdn(
Host(host): Host,
Query(query): Query<DecryptionKeyQuery>,
State(cache): State<Cache>,
) -> Result<(HeaderMap, Vec<u8>), StatusCode> {
debug!("Received request at {host}");
let mut parts = host.split('.');
let prefix = parts
.next()
.expect("Splitting a String always yields at least 1 result; Qed.");
if prefix.len() != 32 {
return Err(StatusCode::BAD_REQUEST);
}
let mut hash = [0; 16];
faster_hex::hex_decode(prefix.as_bytes(), &mut hash).map_err(|_| StatusCode::BAD_REQUEST)?;
let registry_url = parts.collect::<Vec<_>>().join(".");
let decryption_key = if let Some(query_key) = query.key {
let mut key = [0; 16];
faster_hex::hex_decode(query_key.as_bytes(), &mut key)
.map_err(|_| StatusCode::BAD_REQUEST)?;
Some(key)
} else {
None
};
let meta = load_meta(registry_url.clone(), hash, decryption_key).await?;
debug!("Metadata loaded");
let mut headers = HeaderMap::new();
match meta {
cdn_meta::Metadata::File(file) => {
//
if let Some(mime) = file.mime {
debug!(%mime, "Setting mime type");
headers.append(
CONTENT_TYPE,
mime.parse().map_err(|_| {
warn!("Not serving file with unprocessable mime type");
StatusCode::UNPROCESSABLE_ENTITY
})?,
);
}
// File recombination
let mut content = vec![];
for block in file.blocks {
content.extend_from_slice(cache.fetch_block(&block).await?.as_slice());
}
Ok((headers, content))
}
cdn_meta::Metadata::Directory(dir) => {
let mut out = r#"
<!DOCTYPE html>
<html i18n-values="dir:textdirection;lang:language">
<head>
<meta charset="utf-8">
</head>
<body>
<ul>"#
.to_string();
headers.append(
CONTENT_TYPE,
"text/html"
.parse()
.expect("Can parse \"text/html\" to content-type"),
);
for (file_hash, encryption_key) in dir.files {
let meta = load_meta(registry_url.clone(), file_hash, encryption_key).await?;
let name = match meta {
cdn_meta::Metadata::File(file) => file.name,
cdn_meta::Metadata::Directory(dir) => dir.name,
};
out.push_str(&format!(
"<li><a href=\"http://{}.{registry_url}/?key={}\">{name}</a></li>\n",
faster_hex::hex_string(&file_hash),
&encryption_key
.map(|ek| faster_hex::hex_string(&ek))
.unwrap_or_else(String::new),
));
}
out.push_str("</ul></body></html>");
Ok((headers, out.into()))
}
}
}
/// Load a metadata blob from a metadata repository.
async fn load_meta(
registry_url: String,
hash: cdn_meta::Hash,
encryption_key: Option<cdn_meta::Hash>,
) -> Result<cdn_meta::Metadata, StatusCode> {
let mut r_url = reqwest::Url::parse(&format!("http://{registry_url}")).map_err(|err| {
error!(%err, "Could not parse registry URL");
StatusCode::INTERNAL_SERVER_ERROR
})?;
let hex_hash = faster_hex::hex_string(&hash);
r_url.set_path(&format!("/api/v1/metadata/{hex_hash}"));
r_url.set_scheme("http").map_err(|_| {
error!("Could not set HTTP scheme");
StatusCode::INTERNAL_SERVER_ERROR
})?;
debug!(url = %r_url, "Fetching chunk");
let metadata_reply = reqwest::get(r_url).await.map_err(|err| {
error!(%err, "Could not load metadata from registry");
StatusCode::INTERNAL_SERVER_ERROR
})?;
// TODO: Should we just check if status code is success here?
if metadata_reply.status() != StatusCode::OK {
debug!(
status = %metadata_reply.status(),
"Registry replied with non-OK status code"
);
return Err(metadata_reply.status());
}
let encrypted_metadata = metadata_reply.bytes().await.map_err(|err| {
error!(%err, "Could not load metadata response from registry");
StatusCode::INTERNAL_SERVER_ERROR
})?;
let metadata = if let Some(encryption_key) = encryption_key {
if encrypted_metadata.len() < 12 {
debug!("Attempting to decrypt metadata with inufficient size");
return Err(StatusCode::UNPROCESSABLE_ENTITY);
}
let decryptor = aes_gcm::Aes128Gcm::new(&encryption_key.into());
let plaintext = decryptor
.decrypt(
encrypted_metadata[encrypted_metadata.len() - 12..].into(),
&encrypted_metadata[..encrypted_metadata.len() - 12],
)
.map_err(|_| {
warn!("Decryption of block failed");
// Either the decryption key is wrong or the blob is corrupt, we assume the
// registry is not a fault so the decryption key is wrong, which is a user error.
StatusCode::UNPROCESSABLE_ENTITY
})?;
plaintext
} else {
encrypted_metadata.into()
};
// If the metadata is not decodable, this is not really our fault, but also not the necessarily
// the users fault.
let (meta, consumed) =
cdn_meta::Metadata::from_binary(&metadata).map_err(|_| StatusCode::UNPROCESSABLE_ENTITY)?;
if consumed != metadata.len() {
warn!(
metadata_length = metadata.len(),
consumed, "Trailing binary metadata which wasn't decoded"
);
}
Ok(meta)
}
impl Drop for Cdn {
fn drop(&mut self) {
self.cancel_token.cancel();
}
}
/// Download a shard from a 0-db.
async fn download_shard(
location: &cdn_meta::Location,
key: &[u8],
) -> Result<Vec<u8>, Box<dyn std::error::Error>> {
let client = redis::Client::open(format!("redis://{}", location.host))?;
let mut con = client.get_multiplexed_async_connection().await?;
redis::cmd("SELECT")
.arg(&location.namespace)
.query_async::<()>(&mut con)
.await?;
Ok(redis::cmd("GET").arg(key).query_async(&mut con).await?)
}
impl Cache {
async fn fetch_block(&self, block: &cdn_meta::Block) -> Result<Vec<u8>, StatusCode> {
let mut cached_file_path = self.base.clone();
cached_file_path.push(faster_hex::hex_string(&block.encrypted_hash));
// If we have the file in cache, just open it, load it, and return from there.
if cached_file_path.exists() {
return tokio::fs::read(&cached_file_path).await.map_err(|err| {
error!(%err, "Could not load cached file");
StatusCode::INTERNAL_SERVER_ERROR
});
}
// File is not in cache, download and save
// TODO: Rank based on expected latency
// FIXME: Only download the required amount
let mut shard_stream = block
.shards
.iter()
.enumerate()
.map(|(i, loc)| async move { (i, download_shard(loc, &block.encrypted_hash).await) })
.collect::<FuturesUnordered<_>>();
let mut shards = vec![None; block.shards.len()];
while let Some((idx, shard)) = shard_stream.next().await {
let shard = shard.map_err(|err| {
warn!(err, "Could not load shard");
StatusCode::INTERNAL_SERVER_ERROR
})?;
shards[idx] = Some(shard);
}
// recombine
let encoder = reed_solomon_erasure::galois_8::ReedSolomon::new(
block.required_shards as usize,
block.shards.len() - block.required_shards as usize,
)
.map_err(|err| {
error!(%err, "Failed to construct erausre codec");
StatusCode::INTERNAL_SERVER_ERROR
})?;
encoder.reconstruct_data(&mut shards).map_err(|err| {
error!(%err, "Shard recombination failed");
StatusCode::INTERNAL_SERVER_ERROR
})?;
// SAFETY: Since decoding was succesfull, the first shards (data shards) must be
// Option::Some
let mut encrypted_data = shards
.into_iter()
.map(Option::unwrap)
.take(block.required_shards as usize)
.flatten()
.collect::<Vec<_>>();
let padding_len = encrypted_data[encrypted_data.len() - 1] as usize;
encrypted_data.resize(encrypted_data.len() - padding_len, 0);
let decryptor = aes_gcm::Aes128Gcm::new(&block.content_hash.into());
let c = decryptor
.decrypt(&block.nonce.into(), encrypted_data.as_slice())
.map_err(|err| {
warn!(%err, "Decryption of content block failed");
StatusCode::UNPROCESSABLE_ENTITY
})?;
// Save file to cache, this is not critical if it fails
if let Err(err) = tokio::fs::write(&cached_file_path, &c).await {
warn!(%err, "Could not write block to cache");
};
Ok(c)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/rr_cache.rs | mycelium/src/rr_cache.rs | //! This module contains a cache implementation for route requests
use std::{
net::{IpAddr, Ipv6Addr},
sync::Arc,
};
use dashmap::DashMap;
use tokio::time::{Duration, Instant};
use tracing::trace;
use crate::{babel::RouteRequest, peer::Peer, subnet::Subnet, task::AbortHandle};
/// Clean the route request cache every 5 seconds
const CACHE_CLEANING_INTERVAL: Duration = Duration::from_secs(5);
/// IP used for the [`Subnet`] in the cache in case there is no prefix specified.
const GLOBAL_SUBNET_IP: IpAddr = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0));
/// Prefix size to use for the [`Subnet`] in case there is no prefix specified.
const GLOBAL_SUBNET_PREFIX_SIZE: u8 = 0;
/// A self cleaning cache for route requests.
#[derive(Clone)]
pub struct RouteRequestCache {
/// The actual cache, mapping an instance of a route request to the peers which we've sent this
/// to.
cache: Arc<DashMap<Subnet, RouteRequestInfo, ahash::RandomState>>,
_cleanup_task: Arc<AbortHandle>,
}
struct RouteRequestInfo {
/// The lowest generation we've forwarded.
generation: u8,
/// Peers which we've sent this route request to already.
receivers: Vec<Peer>,
/// The moment we've sent this route request
sent: Instant,
}
impl RouteRequestCache {
/// Create a new cache which cleans entries which are older than the given expiration.
///
/// The cache cleaning is done periodically, so entries might live slightly longer than the
/// allowed expiration.
pub fn new(expiration: Duration) -> Self {
let cache = Arc::new(DashMap::with_hasher(ahash::RandomState::new()));
let _cleanup_task = Arc::new(
tokio::spawn({
let cache = cache.clone();
async move {
loop {
tokio::time::sleep(CACHE_CLEANING_INTERVAL).await;
trace!("Cleaning route request cache");
cache.retain(|subnet, info: &mut RouteRequestInfo| {
if info.sent.elapsed() < expiration {
false
} else {
trace!(%subnet, "Removing exired route request from cache");
true
}
});
}
}
})
.abort_handle()
.into(),
);
Self {
cache,
_cleanup_task,
}
}
/// Record a route request which has been sent to peers.
pub fn sent_route_request(&self, rr: RouteRequest, receivers: Vec<Peer>) {
let subnet = rr.prefix().unwrap_or(
Subnet::new(GLOBAL_SUBNET_IP, GLOBAL_SUBNET_PREFIX_SIZE)
.expect("Static global IPv6 subnet is valid; qed"),
);
let generation = rr.generation();
let rri = RouteRequestInfo {
generation,
receivers,
sent: Instant::now(),
};
self.cache.insert(subnet, rri);
}
/// Get cached info about a route request for a subnet, if it exists.
pub fn info(&self, subnet: Subnet) -> Option<(u8, Vec<Peer>)> {
self.cache
.get(&subnet)
.map(|rri| (rri.generation, rri.receivers.clone()))
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel.rs | mycelium/src/babel.rs | //! This module contains babel related structs.
//!
//! We don't fully implement the babel spec, and items which are implemented might deviate to fit
//! our specific use case. For reference, the implementation is based on [this
//! RFC](https://datatracker.ietf.org/doc/html/rfc8966).
use std::io;
use bytes::{Buf, BufMut};
use tokio_util::codec::{Decoder, Encoder};
use tracing::trace;
pub use self::{
hello::Hello, ihu::Ihu, route_request::RouteRequest, seqno_request::SeqNoRequest,
update::Update,
};
pub use self::tlv::Tlv;
mod hello;
mod ihu;
mod route_request;
mod seqno_request;
mod tlv;
mod update;
/// Magic byte to identify babel protocol packet.
const BABEL_MAGIC: u8 = 42;
/// The version of the protocol we are currently using.
const BABEL_VERSION: u8 = 3;
/// Size of a babel header on the wire.
const HEADER_WIRE_SIZE: usize = 4;
/// TLV type for the [`Hello`] tlv
const TLV_TYPE_HELLO: u8 = 4;
/// TLV type for the [`Ihu`] tlv
const TLV_TYPE_IHU: u8 = 5;
/// TLV type for the [`Update`] tlv
const TLV_TYPE_UPDATE: u8 = 8;
/// TLV type for the [`RouteRequest`] tlv
const TLV_TYPE_ROUTE_REQUEST: u8 = 9;
/// TLV type for the [`SeqNoRequest`] tlv
const TLV_TYPE_SEQNO_REQUEST: u8 = 10;
/// Wildcard address, the value is empty (0 bytes length).
const AE_WILDCARD: u8 = 0;
/// IPv4 address, the value is _at most_ 4 bytes long.
const AE_IPV4: u8 = 1;
/// IPv6 address, the value is _at most_ 16 bytes long.
const AE_IPV6: u8 = 2;
/// Link-local IPv6 address, the value is 8 bytes long. This implies a `fe80::/64` prefix.
const AE_IPV6_LL: u8 = 3;
/// A codec which can send and receive whole babel packets on the wire.
#[derive(Debug, Clone)]
pub struct Codec {
header: Option<Header>,
}
impl Codec {
/// Create a new `BabelCodec`.
pub fn new() -> Self {
Self { header: None }
}
/// Resets the `BabelCodec` to its default state.
pub fn reset(&mut self) {
self.header = None;
}
}
/// The header for a babel packet. This follows the definition of the header [in the
/// RFC](https://datatracker.ietf.org/doc/html/rfc8966#name-packet-format). Since the header
/// contains only hard-coded fields and the length of an encoded body, there is no need for users
/// to manually construct this. In fact, it exists only to make our lives slightly easier in
/// reading/writing the header on the wire.
#[derive(Debug, Clone)]
struct Header {
magic: u8,
version: u8,
/// This is the length of the whole body following this header. Also excludes any possible
/// trailers.
body_length: u16,
}
impl Decoder for Codec {
type Item = Tlv;
type Error = io::Error;
fn decode(&mut self, src: &mut bytes::BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// Read a header if we don't have one yet.
let header = if let Some(header) = self.header.take() {
trace!("Continue from stored header");
header
} else {
if src.remaining() < HEADER_WIRE_SIZE {
trace!("Insufficient bytes to read a babel header");
return Ok(None);
}
trace!("Read babel header");
Header {
magic: src.get_u8(),
version: src.get_u8(),
body_length: src.get_u16(),
}
};
if src.remaining() < header.body_length as usize {
trace!("Insufficient bytes to read babel body");
self.header = Some(header);
return Ok(None);
}
// Siltently ignore packets which don't have the correct values set, as defined in the
// spec. Note that we consume the amount of bytes indentified so we leave the parser in the
// correct state for the next packet.
if header.magic != BABEL_MAGIC || header.version != BABEL_VERSION {
trace!("Dropping babel packet with wrong magic or version");
src.advance(header.body_length as usize);
self.reset();
return Ok(None);
}
// at this point we have a whole body loaded in the buffer. We currently don't support sub
// TLV's
trace!("Read babel TLV body");
// TODO: Technically we need to loop here as we can have multiple TLVs.
// TLV header
let tlv_type = src.get_u8();
let body_len = src.get_u8();
// TLV payload
let tlv = match tlv_type {
TLV_TYPE_HELLO => Some(Hello::from_bytes(src).into()),
TLV_TYPE_IHU => Ihu::from_bytes(src, body_len).map(From::from),
TLV_TYPE_UPDATE => Update::from_bytes(src, body_len).map(From::from),
TLV_TYPE_ROUTE_REQUEST => RouteRequest::from_bytes(src, body_len).map(From::from),
TLV_TYPE_SEQNO_REQUEST => SeqNoRequest::from_bytes(src, body_len).map(From::from),
_ => {
// unrecoginized body type, silently drop
trace!("Dropping unrecognized tlv");
// We already read 2 bytes
src.advance(header.body_length as usize - 2);
self.reset();
return Ok(None);
}
};
Ok(tlv)
}
}
impl Encoder<Tlv> for Codec {
type Error = io::Error;
fn encode(&mut self, item: Tlv, dst: &mut bytes::BytesMut) -> Result<(), Self::Error> {
// Write header
dst.put_u8(BABEL_MAGIC);
dst.put_u8(BABEL_VERSION);
dst.put_u16(item.wire_size() as u16 + 2); // tlv payload + tlv header
// Write TLV's, TODO: currently only 1 TLV/body
// TLV header
match item {
Tlv::Hello(_) => dst.put_u8(TLV_TYPE_HELLO),
Tlv::Ihu(_) => dst.put_u8(TLV_TYPE_IHU),
Tlv::Update(_) => dst.put_u8(TLV_TYPE_UPDATE),
Tlv::RouteRequest(_) => dst.put_u8(TLV_TYPE_ROUTE_REQUEST),
Tlv::SeqNoRequest(_) => dst.put_u8(TLV_TYPE_SEQNO_REQUEST),
}
dst.put_u8(item.wire_size());
item.write_bytes(dst);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{net::Ipv6Addr, time::Duration};
use futures::{SinkExt, StreamExt};
use tokio_util::codec::Framed;
use crate::subnet::Subnet;
#[tokio::test]
async fn codec_hello() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let hello = super::Hello::new_unicast(15.into(), 400);
sender
.send(hello.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_hello = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(hello), recv_hello);
}
#[tokio::test]
async fn codec_ihu() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let ihu = super::Ihu::new(27.into(), 400, None);
sender
.send(ihu.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_ihu = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(ihu), recv_ihu);
}
#[tokio::test]
async fn codec_update() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let update = super::Update::new(
Duration::from_secs(400),
16.into(),
25.into(),
Subnet::new(Ipv6Addr::new(0x400, 1, 2, 3, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
]
.into(),
);
sender
.send(update.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
println!("Sent update packet");
let recv_update = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
println!("Received update packet");
assert_eq!(super::Tlv::from(update), recv_update);
}
#[tokio::test]
async fn codec_seqno_request() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let snr = super::SeqNoRequest::new(
16.into(),
[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
]
.into(),
Subnet::new(Ipv6Addr::new(0x400, 1, 2, 3, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
);
sender
.send(snr.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_update = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(snr), recv_update);
}
#[tokio::test]
async fn codec_route_request() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let rr = super::RouteRequest::new(
Some(
Subnet::new(Ipv6Addr::new(0x400, 1, 2, 3, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
13,
);
sender
.send(rr.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_update = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(rr), recv_update);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/router_id.rs | mycelium/src/router_id.rs | use core::fmt;
use crate::crypto::PublicKey;
/// A `RouterId` uniquely identifies a router in the network.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct RouterId {
pk: PublicKey,
zone: [u8; 2],
rnd: [u8; 6],
}
impl RouterId {
/// Size in bytes of a `RouterId`
pub const BYTE_SIZE: usize = 40;
/// Create a new `RouterId` from a [`PublicKey`].
pub fn new(pk: PublicKey) -> Self {
Self {
pk,
zone: [0; 2],
rnd: rand::random(),
}
}
/// View this `RouterId` as a byte array.
pub fn as_bytes(&self) -> [u8; Self::BYTE_SIZE] {
let mut out = [0; Self::BYTE_SIZE];
out[..32].copy_from_slice(self.pk.as_bytes());
out[32..34].copy_from_slice(&self.zone);
out[34..].copy_from_slice(&self.rnd);
out
}
/// Converts this `RouterId` to a [`PublicKey`].
pub fn to_pubkey(self) -> PublicKey {
self.pk
}
}
impl From<[u8; Self::BYTE_SIZE]> for RouterId {
fn from(bytes: [u8; Self::BYTE_SIZE]) -> RouterId {
RouterId {
pk: PublicKey::from(<&[u8] as TryInto<[u8; 32]>>::try_into(&bytes[..32]).unwrap()),
zone: bytes[32..34].try_into().unwrap(),
rnd: bytes[34..Self::BYTE_SIZE].try_into().unwrap(),
}
}
}
impl fmt::Display for RouterId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let RouterId { pk, zone, rnd } = self;
f.write_fmt(format_args!(
"{pk}-{}-{}",
faster_hex::hex_string(zone),
faster_hex::hex_string(rnd)
))
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/source_table.rs | mycelium/src/source_table.rs | use core::fmt;
use std::{collections::HashMap, time::Duration};
use tokio::{sync::mpsc, task::JoinHandle};
use tracing::error;
use crate::{
babel, metric::Metric, router_id::RouterId, routing_table::RouteEntry, sequence_number::SeqNo,
subnet::Subnet,
};
/// Duration after which a source entry is deleted if it is not updated.
const SOURCE_HOLD_DURATION: Duration = Duration::from_secs(60 * 30);
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)]
pub struct SourceKey {
subnet: Subnet,
router_id: RouterId,
}
#[derive(Debug, Clone, Copy)]
pub struct FeasibilityDistance {
metric: Metric,
seqno: SeqNo,
}
#[derive(Debug)]
pub struct SourceTable {
table: HashMap<SourceKey, (JoinHandle<()>, FeasibilityDistance)>,
}
impl FeasibilityDistance {
pub fn new(metric: Metric, seqno: SeqNo) -> Self {
FeasibilityDistance { metric, seqno }
}
/// Returns the metric for this `FeasibilityDistance`.
pub const fn metric(&self) -> Metric {
self.metric
}
/// Returns the sequence number for this `FeasibilityDistance`.
pub const fn seqno(&self) -> SeqNo {
self.seqno
}
}
impl SourceKey {
/// Create a new `SourceKey`.
pub const fn new(subnet: Subnet, router_id: RouterId) -> Self {
Self { subnet, router_id }
}
/// Returns the [`RouterId`] for this `SourceKey`.
pub const fn router_id(&self) -> RouterId {
self.router_id
}
/// Returns the [`Subnet`] for this `SourceKey`.
pub const fn subnet(&self) -> Subnet {
self.subnet
}
/// Updates the [`RouterId`] of this `SourceKey`
pub fn set_router_id(&mut self, router_id: RouterId) {
self.router_id = router_id
}
}
impl SourceTable {
pub fn new() -> Self {
Self {
table: HashMap::new(),
}
}
pub fn insert(
&mut self,
key: SourceKey,
feas_dist: FeasibilityDistance,
sink: mpsc::Sender<SourceKey>,
) {
let expiration_handle = tokio::spawn(async move {
tokio::time::sleep(SOURCE_HOLD_DURATION).await;
if let Err(e) = sink.send(key).await {
error!("Failed to notify router of expired source key {e}");
}
});
// Abort the old task if present.
if let Some((old_timeout, _)) = self.table.insert(key, (expiration_handle, feas_dist)) {
old_timeout.abort();
}
}
/// Remove an entry from the source table.
pub fn remove(&mut self, key: &SourceKey) {
if let Some((old_timeout, _)) = self.table.remove(key) {
old_timeout.abort();
};
}
/// Resets the garbage collection timer for a given source key.
///
/// Does nothing if the source key is not present.
pub fn reset_timer(&mut self, key: SourceKey, sink: mpsc::Sender<SourceKey>) {
self.table
.entry(key)
.and_modify(|(old_expiration_handle, _)| {
// First cancel the existing task
old_expiration_handle.abort();
// Then set the new one
*old_expiration_handle = tokio::spawn(async move {
tokio::time::sleep(SOURCE_HOLD_DURATION).await;
if let Err(e) = sink.send(key).await {
error!("Failed to notify router of expired source key {e}");
}
});
});
}
/// Get the [`FeasibilityDistance`] currently associated with the [`SourceKey`].
pub fn get(&self, key: &SourceKey) -> Option<&FeasibilityDistance> {
self.table.get(key).map(|(_, v)| v)
}
/// Indicates if an update is feasible in the context of the current `SoureTable`.
pub fn is_update_feasible(&self, update: &babel::Update) -> bool {
// Before an update is accepted it should be checked against the feasbility condition
// If an entry in the source table with the same source key exists, we perform the feasbility check
// If no entry exists yet, the update is accepted as there is no better alternative available (yet)
let source_key = SourceKey::new(update.subnet(), update.router_id());
match self.get(&source_key) {
Some(entry) => {
(update.seqno().gt(&entry.seqno()))
|| (update.seqno() == entry.seqno() && update.metric() < entry.metric())
|| update.metric().is_infinite()
}
None => true,
}
}
/// Indicates if a [`RouteEntry`] is feasible according to the `SourceTable`.
pub fn route_feasible(&self, route: &RouteEntry) -> bool {
match self.get(&route.source()) {
Some(fd) => {
(route.seqno().gt(&fd.seqno))
|| (route.seqno() == fd.seqno && route.metric() < fd.metric)
|| route.metric().is_infinite()
}
None => true,
}
}
}
impl fmt::Display for SourceKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!(
"{} advertised by {}",
self.subnet, self.router_id
))
}
}
#[cfg(test)]
mod tests {
use tokio::sync::mpsc;
use crate::{
babel,
connection::DuplexStream,
crypto::SecretKey,
metric::Metric,
peer::Peer,
router_id::RouterId,
routing_table::RouteEntry,
sequence_number::SeqNo,
source_table::{FeasibilityDistance, SourceKey, SourceTable},
subnet::Subnet,
};
use std::{net::Ipv6Addr, time::Duration};
/// A retraction is always considered to be feasible.
#[tokio::test]
async fn retraction_update_is_feasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let mut st = SourceTable::new();
st.insert(
SourceKey::new(sn, rid),
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let update = babel::Update::new(
Duration::from_secs(60),
SeqNo::from(0),
Metric::infinite(),
sn,
rid,
);
assert!(st.is_update_feasible(&update));
}
/// An update with a smaller metric but with the same seqno is feasible.
#[tokio::test]
async fn smaller_metric_update_is_feasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let mut st = SourceTable::new();
st.insert(
SourceKey::new(sn, rid),
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let update = babel::Update::new(
Duration::from_secs(60),
SeqNo::from(1),
Metric::from(9),
sn,
rid,
);
assert!(st.is_update_feasible(&update));
}
/// An update with the same metric and seqno is not feasible.
#[tokio::test]
async fn equal_metric_update_is_unfeasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let mut st = SourceTable::new();
st.insert(
SourceKey::new(sn, rid),
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let update = babel::Update::new(
Duration::from_secs(60),
SeqNo::from(1),
Metric::from(10),
sn,
rid,
);
assert!(!st.is_update_feasible(&update));
}
/// An update with a larger metric and the same seqno is not feasible.
#[tokio::test]
async fn larger_metric_update_is_unfeasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let mut st = SourceTable::new();
st.insert(
SourceKey::new(sn, rid),
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let update = babel::Update::new(
Duration::from_secs(60),
SeqNo::from(1),
Metric::from(11),
sn,
rid,
);
assert!(!st.is_update_feasible(&update));
}
/// An update with a lower seqno is not feasible.
#[tokio::test]
async fn lower_seqno_update_is_unfeasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let mut st = SourceTable::new();
st.insert(
SourceKey::new(sn, rid),
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let update = babel::Update::new(
Duration::from_secs(60),
SeqNo::from(0),
Metric::from(1),
sn,
rid,
);
assert!(!st.is_update_feasible(&update));
}
/// An update with a higher seqno is feasible.
#[tokio::test]
async fn higher_seqno_update_is_feasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let mut st = SourceTable::new();
st.insert(
SourceKey::new(sn, rid),
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let update = babel::Update::new(
Duration::from_secs(60),
SeqNo::from(2),
Metric::from(200),
sn,
rid,
);
assert!(st.is_update_feasible(&update));
}
/// A route with a smaller metric but with the same seqno is feasible.
#[tokio::test]
async fn smaller_metric_route_is_feasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let source_key = SourceKey::new(sn, rid);
let mut st = SourceTable::new();
st.insert(
source_key,
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let (router_data_tx, _router_data_rx) = mpsc::channel(1);
let (router_control_tx, _router_control_rx) = mpsc::unbounded_channel();
let (dead_peer_sink, _dead_peer_stream) = mpsc::channel(1);
let (con1, _con2) = tokio::io::duplex(1500);
let neighbor = Peer::new(
router_data_tx,
router_control_tx,
DuplexStream::new(con1),
dead_peer_sink,
)
.expect("Can create a dummy peer");
let re = RouteEntry::new(
source_key,
neighbor,
Metric::new(9),
SeqNo::from(1),
true,
tokio::time::Instant::now() + Duration::from_secs(60),
);
assert!(st.route_feasible(&re));
}
/// If a route has the same metric as the source table it is not feasible.
#[tokio::test]
async fn equal_metric_route_is_unfeasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let source_key = SourceKey::new(sn, rid);
let mut st = SourceTable::new();
st.insert(
source_key,
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let (router_data_tx, _router_data_rx) = mpsc::channel(1);
let (router_control_tx, _router_control_rx) = mpsc::unbounded_channel();
let (dead_peer_sink, _dead_peer_stream) = mpsc::channel(1);
let (con1, _con2) = tokio::io::duplex(1500);
let neighbor = Peer::new(
router_data_tx,
router_control_tx,
DuplexStream::new(con1),
dead_peer_sink,
)
.expect("Can create a dummy peer");
let re = RouteEntry::new(
source_key,
neighbor,
Metric::new(10),
SeqNo::from(1),
true,
tokio::time::Instant::now() + Duration::from_secs(60),
);
assert!(!st.route_feasible(&re));
}
/// If a route has a higher metric as the source table it is not feasible.
#[tokio::test]
async fn higher_metric_route_is_unfeasible() {
let (sink, _) = tokio::sync::mpsc::channel(1);
let sk = SecretKey::new();
let pk = (&sk).into();
let sn = Subnet::new(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 1).into(), 64)
.expect("Valid subnet in test case");
let rid = RouterId::new(pk);
let source_key = SourceKey::new(sn, rid);
let mut st = SourceTable::new();
st.insert(
source_key,
FeasibilityDistance::new(Metric::new(10), SeqNo::from(1)),
sink,
);
let (router_data_tx, _router_data_rx) = mpsc::channel(1);
let (router_control_tx, _router_control_rx) = mpsc::unbounded_channel();
let (dead_peer_sink, _dead_peer_stream) = mpsc::channel(1);
let (con1, _con2) = tokio::io::duplex(1500);
let neighbor = Peer::new(
router_data_tx,
router_control_tx,
DuplexStream::new(con1),
dead_peer_sink,
)
.expect("Can create a dummy peer");
let re = RouteEntry::new(
source_key,
neighbor,
Metric::new(11),
SeqNo::from(1),
true,
tokio::time::Instant::now() + Duration::from_secs(60),
);
assert!(!st.route_feasible(&re));
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/task.rs | mycelium/src/task.rs | //! This module provides some task abstractions which add custom logic to the default behavior.
/// A handle to a task, which is only used to abort the task. In case this handle is dropped, the
/// task is cancelled automatically.
pub struct AbortHandle(tokio::task::AbortHandle);
impl AbortHandle {
/// Abort the task this `AbortHandle` is referencing. It is safe to call this method multiple
/// times, but only the first call is actually usefull. It is possible for the task to still
/// finish succesfully, even after abort is called.
#[inline]
pub fn abort(&self) {
self.0.abort()
}
}
impl Drop for AbortHandle {
#[inline]
fn drop(&mut self) {
self.0.abort()
}
}
impl From<tokio::task::AbortHandle> for AbortHandle {
#[inline]
fn from(value: tokio::task::AbortHandle) -> Self {
Self(value)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/peer_manager.rs | mycelium/src/peer_manager.rs | #[cfg(feature = "private-network")]
use crate::connection::tls::TlsStream;
use crate::connection::{Quic, TcpStream};
use crate::endpoint::{Endpoint, Protocol};
use crate::metrics::Metrics;
use crate::peer::{Peer, PeerRef};
use crate::router::Router;
use crate::router_id::RouterId;
use futures::stream::FuturesUnordered;
use futures::{FutureExt, StreamExt};
#[cfg(feature = "private-network")]
use openssl::ssl::{Ssl, SslAcceptor, SslConnector, SslMethod};
use quinn::crypto::rustls::QuicClientConfig;
use quinn::{congestion, MtuDiscoveryConfig, ServerConfig, TransportConfig};
use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer, ServerName, UnixTime};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::io;
use std::net::{IpAddr, SocketAddr, SocketAddrV6};
#[cfg(target_os = "linux")]
use std::os::fd::AsFd;
#[cfg(feature = "private-network")]
use std::pin::Pin;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::{collections::hash_map::Entry, future::IntoFuture};
use tokio::net::{TcpListener, UdpSocket};
use tokio::task::AbortHandle;
use tokio::time::{Instant, MissedTickBehavior};
use tracing::{debug, error, info, instrument, trace, warn};
/// Magic bytes to identify a multicast UDP packet used in link local peer discovery.
const MYCELIUM_MULTICAST_DISCOVERY_MAGIC: &[u8; 8] = b"mycelium";
/// Size of a peer discovery beacon.
const PEER_DISCOVERY_BEACON_SIZE: usize = 8 + 2 + 40;
/// Link local peer discovery group joined by the UDP listener.
const LL_PEER_DISCOVERY_GROUP: &str = "ff02::cafe";
/// The time between sending consecutive link local discovery beacons.
const LL_PEER_DISCOVERY_BEACON_INTERVAL: Duration = Duration::from_secs(60);
/// The time between checking known peer liveness and trying to reconnect.
const PEER_CONNECT_INTERVAL: Duration = Duration::from_secs(5);
/// The maximum amount of successive failures allowed when connecting to a local discovered peer,
/// before it is forgotten.
const MAX_FAILED_LOCAL_PEER_CONNECTION_ATTEMPTS: usize = 3;
/// The amount of time allowed for a peer to finish the quic handshake when it connects to us. This
/// prevents a (mallicious) peer from hogging server resources. 10 seconds should be a reasonable
/// default for this, though it can certainly be made more strict if required.
const INBOUND_QUIC_HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10);
/// The maximum amount of concurrent quic handshakes to process from peers connecting to us. We
/// want to find a middle ground where we don't stall valid peers from connecting beceause a single
/// misbehaving peer stalls the connection task, but we also don't want to accept thousands of
/// these in parralel. For now, 10 in parallel should be sufficient, though this can be
/// increased/decreased based on observations.
const MAX_INBOUND_CONCURRENT_QUICK_HANDSHAKES: usize = 10;
/// The PeerManager creates new peers by connecting to configured addresses, and setting up the
/// connection. Once a connection is established, the created [`Peer`] is handed over to the
/// [`Router`].
pub struct PeerManager<M> {
inner: Arc<Inner<M>>,
/// Handles to background tasks so we can abort them when the PeerManager is dropped.
abort_handles: Vec<AbortHandle>,
}
/// Details how the PeerManager learned about a remote.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum PeerType {
/// Statically configured peer.
Static,
/// Peer found through link local discovery.
LinkLocalDiscovery,
/// A remote which initiated a connection to us.
Inbound,
}
/// Local info about a peer.
struct PeerInfo {
/// Details how we found out about this peer.
pt: PeerType,
/// Are we currently connecting to this peer?
connecting: bool,
/// The [`PeerRef`] used to check liveliness.
pr: PeerRef,
/// Amount of failed times we tried to connect to this peer. This is reset after a successful
/// connection.
connection_attempts: usize,
/// Keep track of the amount of bytes we've sent to and received from this peer.
con_traffic: ConnectionTraffic,
/// The moment in time we learned about this peer.
discovered: Instant,
/// The moment we last connected to this peer.
connected: Option<Instant>,
}
/// Counters for the amount of traffic written to and received from a [`Peer`].
#[derive(Debug, Clone)]
struct ConnectionTraffic {
/// Amount of bytes transmitted to this peer.
tx_bytes: Arc<AtomicU64>,
/// Amount of bytes received from this peer.
rx_bytes: Arc<AtomicU64>,
}
/// General state about a connection to a [`Peer`].
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum ConnectionState {
/// There is a working connection to the [`Peer`].
Alive,
/// The system is currently in the process of establishing a new connection to the [`Peer`].
Connecting,
/// There is no connection, or the existing connection is no longer functional.
Dead,
}
/// Identification and information/statistics for a specific [`Peer`]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PeerStats {
/// The endpoint of the [`Peer`].
pub endpoint: Endpoint,
/// The [`Type`](PeerType) of the [`Peer`].
#[serde(rename = "type")]
pub pt: PeerType,
/// State of the connection to this [`Peer`]
pub connection_state: ConnectionState,
/// Amount of bytes transmitted to this [`Peer`].
pub tx_bytes: u64,
/// Amount of bytes received from this [`Peer`].
pub rx_bytes: u64,
/// Amount of time which passed since the system learned about this [`Peer`], in seconds.
pub discovered: u64,
/// Amount of seconds since the last succesfull connection to this [`Peer`].
pub last_connected: Option<u64>,
}
impl PeerInfo {
/// Return the amount of bytes read from this peer.
#[inline]
fn read(&self) -> u64 {
self.con_traffic.rx_bytes.load(Ordering::Relaxed)
}
/// Return the amount of bytes written to this peer.
#[inline]
fn written(&self) -> u64 {
self.con_traffic.tx_bytes.load(Ordering::Relaxed)
}
}
/// Marker error to indicate a [`peer`](Endpoint) is already known.
#[derive(Debug)]
pub struct PeerExists;
/// Marker error to indicate a [`peer`](Endpoint) is not known.
#[derive(Debug)]
pub struct PeerNotFound;
/// PSK used to set up a shared network. Currently 32 bytes though this might change in the future.
pub type PrivateNetworkKey = [u8; 32];
struct Inner<M> {
/// Router is unfortunately wrapped in a Mutex, because router is not Sync.
router: Mutex<Router<M>>,
peers: Mutex<HashMap<Endpoint, PeerInfo>>,
/// Listen port for new peer connections
tcp_listen_port: u16,
quic_socket: Option<quinn::Endpoint>,
/// Identity and name of a private network, if one exists
private_network_config: Option<(String, [u8; 32])>,
metrics: M,
firewall_mark: Option<u32>,
}
impl<M> PeerManager<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
#[allow(clippy::too_many_arguments)]
pub fn new(
router: Router<M>,
static_peers_sockets: Vec<Endpoint>,
tcp_listen_port: u16,
quic_listen_port: Option<u16>,
peer_discovery_port: u16,
disable_peer_discovery: bool,
private_network_config: Option<(String, PrivateNetworkKey)>,
metrics: M,
firewall_mark: Option<u32>,
) -> Result<Self, Box<dyn std::error::Error>> {
let is_private_net = private_network_config.is_some();
// Currently we don't support Quic when a private network is used.
let quic_socket = if !is_private_net {
if let Some(quic_listen_port) = quic_listen_port {
Some(make_quic_endpoint(
router.router_id(),
quic_listen_port,
firewall_mark,
)?)
} else {
None
}
} else {
None
};
// Set the initially configured peer count in metrics.
metrics.peer_manager_known_peers(static_peers_sockets.len());
let mut peer_manager = PeerManager {
inner: Arc::new(Inner {
router: Mutex::new(router),
peers: Mutex::new(
static_peers_sockets
.into_iter()
// These peers are not alive, but we say they are because the reconnect
// loop will perform the actual check and figure out they are dead, then
// (re)connect.
.map(|s| {
let now = tokio::time::Instant::now();
(
s,
PeerInfo {
pt: PeerType::Static,
connecting: false,
pr: PeerRef::new(),
connection_attempts: 0,
con_traffic: ConnectionTraffic {
tx_bytes: Arc::new(AtomicU64::new(0)),
rx_bytes: Arc::new(AtomicU64::new(0)),
},
discovered: now,
connected: None,
},
)
})
.collect(),
),
tcp_listen_port,
quic_socket,
private_network_config,
metrics,
firewall_mark,
}),
abort_handles: vec![],
};
// Start listeners for inbound connections.
// Start the tcp listener, in case we are running a private network the tcp listener will
// actually be a tls listener.
let handle = tokio::spawn(peer_manager.inner.clone().tcp_listener());
peer_manager.abort_handles.push(handle.abort_handle());
if is_private_net {
info!("Enabled private network mode");
} else if peer_manager.inner.quic_socket.is_some() {
// Currently quic is not supported in private network mode.
let handle = tokio::spawn(peer_manager.inner.clone().quic_listener());
peer_manager.abort_handles.push(handle.abort_handle());
};
// Start (re)connecting to outbound/local peers
let handle = tokio::spawn(peer_manager.inner.clone().connect_to_peers());
peer_manager.abort_handles.push(handle.abort_handle());
// Discover local peers, this does not actually connect to them. That is handle by the
// connect_to_peers task.
if !disable_peer_discovery {
let handle = tokio::spawn(
peer_manager
.inner
.clone()
.local_discovery(peer_discovery_port),
);
peer_manager.abort_handles.push(handle.abort_handle());
}
Ok(peer_manager)
}
/// Add a new peer to the system.
///
/// The peer starts of as a dead peer, and connecting is handled in the reconnect loop.
///
/// # Errors
///
/// This function returns an error if the [`Endpoint`] is already known.
pub fn add_peer(&self, peer: Endpoint) -> Result<(), PeerExists> {
let mut peer_map = self.inner.peers.lock().unwrap();
if peer_map.contains_key(&peer) {
return Err(PeerExists);
}
let now = tokio::time::Instant::now();
peer_map.insert(
peer,
PeerInfo {
pt: PeerType::Static,
connecting: false,
pr: PeerRef::new(),
connection_attempts: 0,
con_traffic: ConnectionTraffic {
tx_bytes: Arc::new(AtomicU64::new(0)),
rx_bytes: Arc::new(AtomicU64::new(0)),
},
discovered: now,
connected: None,
},
);
Ok(())
}
/// Delete a peer from the system.
///
/// The peer will be disconnected if it is currently connected.
///
/// # Errors
///
/// Returns an error if there is no peer identified by the given [`Endpoint`].
pub fn delete_peer(&self, endpoint: &Endpoint) -> Result<(), PeerNotFound> {
let mut peer_map = self.inner.peers.lock().unwrap();
peer_map.remove(endpoint).ok_or(PeerNotFound).map(|pi| {
// Make sure we kill the peer connection if one exists
if let Some(peer) = pi.pr.upgrade() {
peer.died();
}
})
}
/// Get a view of all known peers and their stats.
pub fn peers(&self) -> Vec<PeerStats> {
let peer_map = self.inner.peers.lock().unwrap();
let mut pi = Vec::with_capacity(peer_map.len());
for (endpoint, peer_info) in peer_map.iter() {
let connection_state = if peer_info.connecting {
ConnectionState::Connecting
} else if peer_info.pr.alive() {
ConnectionState::Alive
} else {
ConnectionState::Dead
};
pi.push(PeerStats {
endpoint: *endpoint,
pt: peer_info.pt.clone(),
connection_state,
tx_bytes: peer_info.written(),
rx_bytes: peer_info.read(),
discovered: peer_info.discovered.elapsed().as_secs(),
last_connected: peer_info.connected.map(|i| i.elapsed().as_secs()),
});
}
pi
}
}
impl<M> Drop for PeerManager<M> {
fn drop(&mut self) {
// Cancel all background tasks
for ah in &self.abort_handles {
ah.abort();
}
}
}
impl<M> Inner<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Connect and if needed reconnect to known peers.
async fn connect_to_peers(self: Arc<Self>) {
let mut peer_check_interval = tokio::time::interval(PEER_CONNECT_INTERVAL);
// Avoid trying to spam connections. Since we track if we are connecting to a peer this
// won't be that bad, but this avoid unnecessary lock contention.
peer_check_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
// A list of pending connection futures. Like this, we don't have to spawn a new future for
// every connection task.
let mut connection_futures = FuturesUnordered::new();
loop {
tokio::select! {
// We don't care about the none case, that can happen when we aren't connecting to
// any peers.
Some((endpoint, maybe_new_peer)) = connection_futures.next() => {
// Only insert the possible new peer if we actually still care about it
let mut peers = self.peers.lock().unwrap();
if let Some(pi) = peers.get_mut(&endpoint) {
// Regardless of what happened, we are no longer connecting.
self.metrics.peer_manager_connection_finished();
pi.connecting = false;
if let Some(peer) = maybe_new_peer {
// We did find a new Peer, insert into router and keep track of it
// Use fully qualified call to aid compiler in type inference.
pi.pr = Peer::refer(&peer);
self.router.lock().unwrap().add_peer_interface(peer);
// We successfully connected, reset the connection_attempts counter to 0
pi.connection_attempts = 0;
pi.connected = Some(tokio::time::Instant::now());
} else {
// Only log with error level on the first connection failure, to avoid spamming the logs
if pi.connection_attempts == 0 {
error!(endpoint.address=%endpoint.address(), endpoint.proto=%endpoint.proto(), "Couldn't connect to endpoint, turn on debug logging for more details");
} else {
debug!(endpoint.address=%endpoint.address(), endpoint.proto=%endpoint.proto(), attempt=%pi.connection_attempts+1, "Couldn't connect to endpoint")
}
// Connection failed, add a failed attempt and forget about the peer if
// needed.
pi.connection_attempts += 1;
if pi.pt == PeerType::LinkLocalDiscovery
&& pi.connection_attempts >= MAX_FAILED_LOCAL_PEER_CONNECTION_ATTEMPTS {
info!(endpoint.address=%endpoint.address(), endpoint.proto=%endpoint.proto(), "Forgetting about locally discovered peer after failing to connect to it");
peers.remove(&endpoint);
}
}
}
}
_ = peer_check_interval.tick() => {
// Remove dead inbound peers
self.peers.lock().unwrap().retain(|_, v| v.pt != PeerType::Inbound || v.pr.alive());
debug!("Looking for dead peers");
// check if there is an entry for the peer in the router's peer list
for (endpoint, pi) in self.peers.lock().unwrap().iter_mut() {
if !pi.connecting && !pi.pr.alive() {
debug!(endpoint.address=%endpoint.address(), endpoint.proto=%endpoint.proto(), "Found dead peer");
if pi.pt == PeerType::Inbound {
debug!(endpoint.address=%endpoint.address(), endpoint.proto=%endpoint.proto(), "Refusing to reconnect to inbound peer");
continue
}
// Mark that we are connecting to the peer.
pi.connecting = true;
connection_futures.push(self.clone().connect_peer(*endpoint, pi.con_traffic.clone()));
self.metrics.peer_manager_connection_attempted();
}
}
}
}
}
}
/// Create a new connection to a remote peer
#[instrument(skip_all, fields(endpoint.proto=%endpoint.proto(), endpoint.address=%endpoint.address()))]
async fn connect_peer(
self: Arc<Self>,
endpoint: Endpoint,
ct: ConnectionTraffic,
) -> (Endpoint, Option<Peer>) {
debug!("Connecting");
match endpoint.proto() {
Protocol::Tcp | Protocol::Tls => self.connect_tcp_peer(endpoint, ct).await,
Protocol::Quic => self.connect_quic_peer(endpoint, ct).await,
}
}
async fn connect_tcp_peer(
self: Arc<Self>,
endpoint: Endpoint,
ct: ConnectionTraffic,
) -> (Endpoint, Option<Peer>) {
match (endpoint.proto(), &self.private_network_config) {
(Protocol::Tcp, Some(_)) => {
warn!("Attempting to connect over Tcp while a private network is configured, connection will be upgraded to Tls")
}
(Protocol::Tls, None) => {
warn!("Attempting to connect over Tls while a private network is not enabled, refusing to connect. Use \"Tcp\" instead");
return (endpoint, None);
}
_ => {}
}
#[cfg(feature = "private-network")]
let connector = if let Some((net_name, net_key)) = self.private_network_config.clone() {
let mut connector = SslConnector::builder(SslMethod::tls_client()).unwrap();
connector.set_psk_client_callback(move |_, _, id, key| {
// Note: identity must be passed in as a 0-terminated C string.
if id.len() < net_name.len() + 1 {
error!("Can't pass in identity to SSL connector");
return Ok(0);
}
id[..net_name.len()].copy_from_slice(net_name.as_bytes());
id[net_name.len()] = 0;
if key.len() < 32 {
error!("Can't pass in key to SSL acceptor");
return Ok(0);
}
// Copy key
key[..32].copy_from_slice(&net_key[..]);
Ok(32)
});
Some(connector.build())
} else {
None
};
match tokio::net::TcpStream::connect(endpoint.address())
.map(|result| result.and_then(|socket| set_fw_mark(socket, self.firewall_mark)))
.await
{
Ok(peer_stream) => {
debug!("Opened connection");
// Make sure Nagle's algorithm is disabled as it can cause latency spikes.
if let Err(e) = peer_stream.set_nodelay(true) {
debug!(err=%e, "Couldn't disable Nagle's algorithm on stream");
return (endpoint, None);
}
// Scope the MutexGuard, if we don't do this the future won't be Send
let (router_data_tx, router_control_tx, dead_peer_sink) = {
let router = self.router.lock().unwrap();
(
router.router_data_tx(),
router.router_control_tx(),
router.dead_peer_sink().clone(),
)
};
#[cfg(feature = "private-network")]
let res = {
if let Some(connector) = connector {
let ssl = match Ssl::new(connector.context()) {
Ok(ssl) => ssl,
Err(e) => {
debug!(err=%e, "Failed to create SSL object from acceptor after connecting to remote");
return (endpoint, None);
}
};
let mut ssl_stream = match tokio_openssl::SslStream::new(ssl, peer_stream) {
Ok(ssl_stream) => ssl_stream,
Err(e) => {
debug!(err=%e, "Failed to create TLS stream from tcp connection to endpoint");
return (endpoint, None);
}
};
// Pin here is needed to call `connect`.
let pinned_stream = Pin::new(&mut ssl_stream);
if let Err(e) = pinned_stream.connect().await {
// Error here is likely a misconfigured server.
debug!(err=%e, "Could not initiate TLS stream");
return (endpoint, None);
}
debug!("Completed TLS handshake");
let tls_stream = match TlsStream::new(ssl_stream, ct.rx_bytes, ct.tx_bytes)
{
Ok(tls_stream) => tls_stream,
Err(err) => {
error!(%err, "Failed to create wrapped Tls stream");
return (endpoint, None);
}
};
Peer::new(
router_data_tx,
router_control_tx,
tls_stream,
dead_peer_sink,
)
} else {
let peer_stream =
match TcpStream::new(peer_stream, ct.rx_bytes, ct.tx_bytes) {
Ok(ps) => ps,
Err(err) => {
error!(%err, "Failed to create wrapped tcp stream");
return (endpoint, None);
}
};
Peer::new(
router_data_tx,
router_control_tx,
peer_stream,
dead_peer_sink,
)
}
};
#[cfg(not(feature = "private-network"))]
let res = {
let peer_stream = match TcpStream::new(peer_stream, ct.rx_bytes, ct.tx_bytes) {
Ok(ps) => ps,
Err(err) => {
error!(%err, "Failed to create wrapped tcp stream");
return (endpoint, None);
}
};
Peer::new(
router_data_tx,
router_control_tx,
peer_stream,
dead_peer_sink,
)
};
match res {
Ok(new_peer) => {
info!("Connected to new peer");
(endpoint, Some(new_peer))
}
Err(e) => {
debug!(err=%e, "Failed to spawn peer");
(endpoint, None)
}
}
}
Err(e) => {
debug!(err=%e, "Couldn't connect");
(endpoint, None)
}
}
}
async fn connect_quic_peer(
self: Arc<Self>,
endpoint: Endpoint,
ct: ConnectionTraffic,
) -> (Endpoint, Option<Peer>) {
let quic_socket = if let Some(quic_socket) = &self.quic_socket {
quic_socket
} else {
debug!("Attempting to connect to quic peer while quic is disabled");
return (endpoint, None);
};
let provider = rustls::crypto::CryptoProvider::get_default()
.expect("We have a quic socket so there is a crypto provider installed");
let qcc = match QuicClientConfig::try_from(
rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(SkipServerVerification::new(provider.clone()))
.with_no_client_auth(),
) {
Ok(qcc) => qcc,
Err(err) => {
debug!(%err, "Failed to build quic client config");
return (endpoint, None);
}
};
let mut config = quinn::ClientConfig::new(Arc::new(qcc));
// Todo: tweak transport config
let mut transport_config = TransportConfig::default();
transport_config.max_concurrent_uni_streams(0_u8.into());
// Larger than needed for now, just in case
transport_config.max_concurrent_bidi_streams(5_u8.into());
// Connection timeout, set to higher than Hello interval to ensure connection does not randomly
// time out.
transport_config.max_idle_timeout(Some(Duration::from_secs(60).try_into().unwrap()));
transport_config.mtu_discovery_config(Some(MtuDiscoveryConfig::default()));
transport_config.keep_alive_interval(Some(Duration::from_secs(20)));
// we don't use datagrams.
transport_config.datagram_receive_buffer_size(Some(16 << 20));
transport_config.datagram_send_buffer_size(16 << 20);
transport_config.initial_mtu(1500);
config.transport_config(Arc::new(transport_config));
match quic_socket.connect_with(config, endpoint.address(), "dummy.mycelium") {
Ok(connecting) => match connecting.await {
Ok(con) => match con.open_bi().await {
Ok((tx, rx)) => {
let q_con = Quic::new(tx, rx, con, ct.tx_bytes, ct.rx_bytes);
let res = {
let router = self.router.lock().unwrap();
let router_data_tx = router.router_data_tx();
let router_control_tx = router.router_control_tx();
let dead_peer_sink = router.dead_peer_sink().clone();
Peer::new(router_data_tx, router_control_tx, q_con, dead_peer_sink)
};
match res {
Ok(new_peer) => {
info!("Connected to new peer");
(endpoint, Some(new_peer))
}
Err(e) => {
debug!(err=%e, "Failed to spawn peer");
(endpoint, None)
}
}
}
Err(e) => {
debug!(err=%e, "Couldn't open bidirectional quic stream");
(endpoint, None)
}
},
Err(e) => {
debug!(err=%e, "Couldn't complete quic connection");
(endpoint, None)
}
},
Err(e) => {
debug!(err=%e, "Couldn't initiate connection");
(endpoint, None)
}
}
}
/// Start listening for new peers on a tcp socket. If a private network is configured, this
/// will instead listen for incoming tls connections.
async fn tcp_listener(self: Arc<Self>) {
// Setup TLS acceptor for private network, if required.
#[cfg(feature = "private-network")]
let acceptor = if let Some((net_name, net_key)) = self.private_network_config.clone() {
let mut acceptor = SslAcceptor::mozilla_modern_v5(SslMethod::tls_server()).unwrap();
acceptor.set_psk_server_callback(move |_ssl_ref, id, key| {
if let Some(id) = id {
if id != net_name.as_bytes() {
debug!("id given by client does not match configured private network name");
return Ok(0);
}
} else {
debug!("No name indicated by client");
return Ok(0);
}
if key.len() < 32 {
warn!("Can't pass in key to SSL acceptor");
return Ok(0);
}
// Copy key
key[..32].copy_from_slice(&net_key[..]);
Ok(32)
});
Some(acceptor.build())
} else {
None
};
// Take a copy of every channel here first so we avoid lock contention in the loop later.
let router_data_tx = self.router.lock().unwrap().router_data_tx();
let router_control_tx = self.router.lock().unwrap().router_control_tx();
let dead_peer_sink = self.router.lock().unwrap().dead_peer_sink().clone();
let listener = TcpListener::bind(("::", self.tcp_listen_port))
.map(|result| result.and_then(|listener| set_fw_mark(listener, self.firewall_mark)));
match listener.await {
Ok(listener) => loop {
match listener.accept().await {
Ok((stream, remote)) => {
let tx_bytes = Arc::new(AtomicU64::new(0));
let rx_bytes = Arc::new(AtomicU64::new(0));
if let Err(e) = stream.set_nodelay(true) {
error!(err=%e, "Couldn't disable Nagle's algorithm on stream");
return;
}
#[cfg(feature = "private-network")]
let new_peer = if let Some(acceptor) = &acceptor {
let ssl = match Ssl::new(acceptor.context()) {
Ok(ssl) => ssl,
Err(e) => {
error!(%remote, err=%e, "Failed to create SSL object from acceptor after remote connected");
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | true |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/interval.rs | mycelium/src/interval.rs | //! Dedicated logic for
//! [intervals](https://datatracker.ietf.org/doc/html/rfc8966#name-solving-starvation-sequenci).
use std::time::Duration;
/// An interval in the babel protocol.
///
/// Intervals represent a duration, and are expressed in centiseconds (0.01 second / 10
/// milliseconds). `Interval` implements [`From`] [`u16`] to create a new interval from a raw
/// value, and [`From`] [`Duration`] to create a new `Interval` from an existing [`Duration`].
/// There are also implementation to convert back to the aforementioned types. Note that in case of
/// duration, millisecond precision is lost.
#[derive(Debug, Clone)]
pub struct Interval(u16);
impl From<Duration> for Interval {
fn from(value: Duration) -> Self {
Interval((value.as_millis() / 10) as u16)
}
}
impl From<Interval> for Duration {
fn from(value: Interval) -> Self {
Duration::from_millis(value.0 as u64 * 10)
}
}
impl From<u16> for Interval {
fn from(value: u16) -> Self {
Interval(value)
}
}
impl From<Interval> for u16 {
fn from(value: Interval) -> Self {
value.0
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/metrics.rs | mycelium/src/metrics.rs | //! This module is used for collection of runtime metrics of a `mycelium` system. The main item of
//! interest is the [`Metrics`] trait. Users can provide their own implementation of this, or use
//! the default provided implementation to disable gathering metrics.
use crate::peer_manager::PeerType;
/// The collection of all metrics exported by a [`mycelium node`](crate::Node). It is up to the
/// user to provide an implementation which implements the methods for metrics they are interested
/// in. All methods have a default implementation, so if the user is not interested in any metrics,
/// a NOOP handler can be implemented as follows:
///
/// ```rust
/// use mycelium::metrics::Metrics;
///
/// #[derive(Clone)]
/// struct NoMetrics;
/// impl Metrics for NoMetrics {}
/// ```
pub trait Metrics {
/// The [`Router`](crate::router::Router) received a new Hello TLV from a peer.
#[inline]
fn router_process_hello(&self) {}
/// The [`Router`](crate::router::Router) received a new IHU TLV from a peer.
#[inline]
fn router_process_ihu(&self) {}
/// The [`Router`](crate::router::Router) received a new Seqno request TLV from a peer.
#[inline]
fn router_process_seqno_request(&self) {}
/// The [`Router`](crate::router::Router) received a new Route request TLV from a peer.
/// Additionally, it is recorded if this is a wildcard request (route table dump request)
/// or a request for a specific subnet.
#[inline]
fn router_process_route_request(&self, _wildcard: bool) {}
/// The [`Router`](crate::router::Router) received a new Update TLV from a peer.
#[inline]
fn router_process_update(&self) {}
/// The [`Router`](crate::router::Router) tried to send an update to a peer, but before sending
/// it we found out the peer is actually already dead.
///
/// This can happen, since a peer is a remote entity we have no control over, and it can be
/// removed at any time for any reason. However, in normal operation, the amount of times this
/// happens should be fairly small compared to the amount of updates we send/receive.
#[inline]
fn router_update_dead_peer(&self) {}
/// The amount of TLV's received from peers, to be processed by the
/// [`Router`](crate::router::Router).
#[inline]
fn router_received_tlv(&self) {}
/// The [`Router`](crate::router::Router) dropped a received TLV before processing it, as the
/// peer who sent it has already died in the meantime.
#[inline]
fn router_tlv_source_died(&self) {}
/// The [`Router`](crate::router::Router) dropped a received TLV before processing it, because
/// it coulnd't keep up
#[inline]
fn router_tlv_discarded(&self) {}
/// A [`Peer`](crate::peer::Peer) was added to the [`Router`](crate::router::Router).
#[inline]
fn router_peer_added(&self) {}
/// A [`Peer`](crate::peer::Peer) was removed from the [`Router`](crate::router::Router).
#[inline]
fn router_peer_removed(&self) {}
/// A [`Peer`](crate::peer::Peer) informed the [`Router`](crate::router::Router) it died, or
/// the router otherwise noticed the Peer is dead.
#[inline]
fn router_peer_died(&self) {}
/// The [`Router`](crate::router::Router) ran a route selection procedure.
#[inline]
fn router_route_selection_ran(&self) {}
/// A [`SourceKey`](crate::source_table::SourceKey) expired and got cleaned up by the [`Router`](crate::router::Router).
#[inline]
fn router_source_key_expired(&self) {}
/// A [`RouteKey`](crate::routing_table::RouteKey) expired, and the router either set the
/// [`Metric`](crate::metric::Metric) of the route to infinity, or cleaned up the route entry
/// altogether.
#[inline]
fn router_route_key_expired(&self, _removed: bool) {}
/// A route which expired was actually the selected route for the
/// [`Subnet`](crate::subnet::Subnet). Note that [`Self::router_route_key_expired`] will
/// also have been called.
#[inline]
fn router_selected_route_expired(&self) {}
/// The [`Router`](crate::router::Router) sends a "triggered" update to it's peers.
#[inline]
fn router_triggered_update(&self) {}
/// The [`Router`](crate::router::Router) extracted a packet for the local subnet.
#[inline]
fn router_route_packet_local(&self) {}
/// The [`Router`](crate::router::Router) forwarded a packet to a peer.
#[inline]
fn router_route_packet_forward(&self) {}
/// The [`Router`](crate::router::Router) dropped a packet it was routing because it's TTL
/// reached 0.
#[inline]
fn router_route_packet_ttl_expired(&self) {}
/// The [`Router`](crate::router::Router) dropped a packet it was routing because there was no
/// route for the destination IP.
#[inline]
fn router_route_packet_no_route(&self) {}
/// The [`Router`](crate::router::Router) replied to a seqno request with a local route, which
/// is more recent (bigger seqno) than the request.
#[inline]
fn router_seqno_request_reply_local(&self) {}
/// The [`Router`](crate::router::Router) replied to a seqno request by bumping its own seqno
/// and advertising the local route.
#[inline]
fn router_seqno_request_bump_seqno(&self) {}
/// The [`Router`](crate::router::Router) dropped a seqno request because the TTL reached 0.
#[inline]
fn router_seqno_request_dropped_ttl(&self) {}
/// The [`Router`](crate::router::Router) forwarded a seqno request to a feasible route.
#[inline]
fn router_seqno_request_forward_feasible(&self) {}
/// The [`Router`](crate::router::Router) forwarded a seqno request to a (potentially)
/// unfeasible route.
#[inline]
fn router_seqno_request_forward_unfeasible(&self) {}
/// The [`Router`](crate::router::Router) dropped a seqno request becase none of the other
/// handling methods applied.
#[inline]
fn router_seqno_request_unhandled(&self) {}
/// The [`time`](std::time::Duration) used by the [`Router`](crate::router::Router) to handle a
/// control packet.
#[inline]
fn router_time_spent_handling_tlv(&self, _duration: std::time::Duration, _tlv_type: &str) {}
/// The [`time`](std::time::Duration) used by the [`Router`](crate::router::Router) to
/// periodically propagate selected routes to peers.
#[inline]
fn router_time_spent_periodic_propagating_selected_routes(
&self,
_duration: std::time::Duration,
) {
}
/// An update was processed and accepted by the router, but did not run route selection.
#[inline]
fn router_update_skipped_route_selection(&self) {}
/// An update was denied by a configured filter.
#[inline]
fn router_update_denied_by_filter(&self) {}
/// An update was accepted by the router filters, but was otherwise unfeasible or a retraction,
/// for an unknown subnet.
#[inline]
fn router_update_not_interested(&self) {}
/// A new [`Peer`](crate::peer::Peer) was added to the
/// [`PeerManager`](crate::peer_manager::PeerManager) while it is running.
#[inline]
fn peer_manager_peer_added(&self, _pt: PeerType) {}
/// Sets the amount of [`Peers`](crate::peer::Peer) known by the
/// [`PeerManager`](crate::peer_manager::PeerManager).
#[inline]
fn peer_manager_known_peers(&self, _amount: usize) {}
/// The [`PeerManager`](crate::peer_manager::PeerManager) started an attempt to connect to a
/// remote endpoint.
#[inline]
fn peer_manager_connection_attempted(&self) {}
/// The [`PeerManager`](crate::peer_manager::PeerManager) finished an attempt to connect to a
/// remote endpoint. The connection could have failed.
#[inline]
fn peer_manager_connection_finished(&self) {}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/tun.rs | mycelium/src/tun.rs | //! The tun module implements a platform independent Tun interface.
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
use crate::subnet::Subnet;
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
pub struct TunConfig {
pub name: String,
pub node_subnet: Subnet,
pub route_subnet: Subnet,
}
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
pub struct TunConfig {
pub tun_fd: i32,
}
#[cfg(target_os = "linux")]
mod linux;
#[cfg(target_os = "linux")]
pub use linux::new;
#[cfg(all(target_os = "macos", not(feature = "mactunfd")))]
mod darwin;
#[cfg(all(target_os = "macos", not(feature = "mactunfd")))]
pub use darwin::new;
#[cfg(target_os = "windows")]
mod windows;
#[cfg(target_os = "windows")]
pub use windows::new;
#[cfg(target_os = "android")]
mod android;
#[cfg(target_os = "android")]
pub use android::new;
#[cfg(any(target_os = "ios", all(target_os = "macos", feature = "mactunfd")))]
mod ios;
#[cfg(any(target_os = "ios", all(target_os = "macos", feature = "mactunfd")))]
pub use ios::new;
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/crypto.rs | mycelium/src/crypto.rs | //! Abstraction over diffie hellman, symmetric encryption, and hashing.
use core::fmt;
use std::{
error::Error,
fmt::Display,
net::Ipv6Addr,
ops::{Deref, DerefMut},
};
use aes_gcm::{aead::OsRng, AeadCore, AeadInPlace, Aes256Gcm, Key, KeyInit};
use serde::{de::Visitor, Deserialize, Serialize};
/// Default MTU for a packet. Ideally this would not be needed and the [`PacketBuffer`] takes a
/// const generic argument which is then expanded with the needed extra space for the buffer,
/// however as it stands const generics can only be used standalone and not in a constant
/// expression. This _is_ possible on nightly rust, with a feature gate (generic_const_exprs).
const PACKET_SIZE: usize = 1_400;
/// Size of an AES_GCM tag in bytes.
const AES_TAG_SIZE: usize = 16;
/// Size of an AES_GCM nonce in bytes.
const AES_NONCE_SIZE: usize = 12;
/// Size of user defined data header. This header will be part of the encrypted data.
const DATA_HEADER_SIZE: usize = 4;
/// Size of a `PacketBuffer`.
const PACKET_BUFFER_SIZE: usize = PACKET_SIZE + AES_TAG_SIZE + AES_NONCE_SIZE + DATA_HEADER_SIZE;
/// A public key used as part of Diffie Hellman key exchange. It is derived from a [`SecretKey`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct PublicKey(x25519_dalek::PublicKey);
/// A secret used as part of Diffie Hellman key exchange.
///
/// This type intentionally does not implement or derive [`Debug`] to avoid accidentally leaking
/// secrets in logs.
#[derive(Clone)]
pub struct SecretKey(x25519_dalek::StaticSecret);
/// A statically computed secret from a [`SecretKey`] and a [`PublicKey`].
///
/// This type intentionally does not implement or derive [`Debug`] to avoid accidentally leaking
/// secrets in logs.
#[derive(Clone)]
pub struct SharedSecret([u8; 32]);
/// A buffer for packets. This holds enough space to encrypt a packet in place without
/// reallocating.
///
/// Internally, the buffer is created with an additional header. Because this header is part of the
/// encrypted content, it is not included in the global version set by the main packet header. As
/// such, an internal version is included.
pub struct PacketBuffer {
buf: Vec<u8>,
/// Amount of bytes written in the buffer
size: usize,
}
/// A reference to the header in a [`PacketBuffer`].
pub struct PacketBufferHeader<'a> {
data: &'a [u8; DATA_HEADER_SIZE],
}
/// A mutable reference to the header in a [`PacketBuffer`].
pub struct PacketBufferHeaderMut<'a> {
data: &'a mut [u8; DATA_HEADER_SIZE],
}
/// Opaque type indicating decryption failed.
#[derive(Debug, Clone, Copy)]
pub struct DecryptionError;
impl Display for DecryptionError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Decryption failed, invalid or insufficient encrypted content for this key")
}
}
impl Error for DecryptionError {}
impl SecretKey {
/// Generate a new `StaticSecret` using [`OsRng`] as an entropy source.
pub fn new() -> Self {
SecretKey(x25519_dalek::StaticSecret::random_from_rng(OsRng))
}
/// View this `SecretKey` as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
/// Computes the [`SharedSecret`] from this `SecretKey` and a [`PublicKey`].
pub fn shared_secret(&self, other: &PublicKey) -> SharedSecret {
SharedSecret(self.0.diffie_hellman(&other.0).to_bytes())
}
}
impl Default for SecretKey {
fn default() -> Self {
Self::new()
}
}
impl PublicKey {
/// Generates an [`Ipv6Addr`] from a `PublicKey`.
///
/// The generated address is guaranteed to be part of the `400::/7` range.
pub fn address(&self) -> Ipv6Addr {
let mut hasher = blake3::Hasher::new();
hasher.update(self.as_bytes());
let mut buf = [0; 16];
hasher.finalize_xof().fill(&mut buf);
// Mangle the first byte to be of the expected form. Because of the network range
// requirement, we MUST set the third bit, and MAY set the last bit. Instead of discarding
// the first 7 bits of the hash, use the first byte to determine if the last bit is set.
// If there is an odd number of bits set in the first byte, set the last bit of the result.
let lsb = buf[0].count_ones() as u8 % 2;
buf[0] = 0x04 | lsb;
Ipv6Addr::from(buf)
}
/// Convert this `PublicKey` to a byte array.
pub fn to_bytes(self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this `PublicKey` as a byte array.
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
impl SharedSecret {
/// Encrypt a [`PacketBuffer`] using the `SharedSecret` as key.
///
/// Internally, a new random nonce will be generated using the OS's crypto rng generator. This
/// nonce is appended to the encrypted data.
pub fn encrypt(&self, mut data: PacketBuffer) -> Vec<u8> {
let key: Key<Aes256Gcm> = self.0.into();
let nonce = Aes256Gcm::generate_nonce(OsRng);
let cipher = Aes256Gcm::new(&key);
let tag = cipher
.encrypt_in_place_detached(&nonce, &[], &mut data.buf[..data.size])
.expect("Encryption can't fail; qed.");
data.buf[data.size..data.size + AES_TAG_SIZE].clone_from_slice(tag.as_slice());
data.buf[data.size + AES_TAG_SIZE..data.size + AES_TAG_SIZE + AES_NONCE_SIZE]
.clone_from_slice(&nonce);
data.buf.truncate(data.size + AES_NONCE_SIZE + AES_TAG_SIZE);
data.buf
}
/// Decrypt a message previously encrypted with an equivalent `SharedSecret`. In other words, a
/// message that was previously created by the [`SharedSecret::encrypt`] method.
///
/// Internally, this messages assumes that a 12 byte nonce is present at the end of the data.
/// If the passed in data to decrypt does not contain a valid nonce, decryption fails and an
/// opaque error is returned. As an extension to this, if the data is not of sufficient length
/// to contain a valid nonce, an error is returned immediately.
pub fn decrypt(&self, mut data: Vec<u8>) -> Result<PacketBuffer, DecryptionError> {
// Make sure we have sufficient data (i.e. a nonce).
if data.len() < AES_NONCE_SIZE + AES_TAG_SIZE + DATA_HEADER_SIZE {
return Err(DecryptionError);
}
let data_len = data.len();
let key: Key<Aes256Gcm> = self.0.into();
{
let (data, nonce) = data.split_at_mut(data_len - AES_NONCE_SIZE);
let (data, tag) = data.split_at_mut(data.len() - AES_TAG_SIZE);
let cipher = Aes256Gcm::new(&key);
cipher
.decrypt_in_place_detached((&*nonce).into(), &[], data, (&*tag).into())
.map_err(|_| DecryptionError)?;
}
Ok(PacketBuffer {
// We did not remove the scratch space used for TAG and NONCE.
size: data.len() - AES_TAG_SIZE - AES_NONCE_SIZE,
buf: data,
})
}
}
impl PacketBuffer {
/// Create a new blank `PacketBuffer`.
pub fn new() -> Self {
Self {
buf: vec![0; PACKET_BUFFER_SIZE],
size: 0,
}
}
/// Get a reference to the packet header.
pub fn header(&self) -> PacketBufferHeader<'_> {
PacketBufferHeader {
data: self.buf[..DATA_HEADER_SIZE]
.try_into()
.expect("Header size constant is correct; qed"),
}
}
/// Get a mutable reference to the packet header.
pub fn header_mut(&mut self) -> PacketBufferHeaderMut<'_> {
PacketBufferHeaderMut {
data: <&mut [u8] as TryInto<&mut [u8; DATA_HEADER_SIZE]>>::try_into(
&mut self.buf[..DATA_HEADER_SIZE],
)
.expect("Header size constant is correct; qed"),
}
}
/// Get a reference to the entire useable inner buffer.
pub fn buffer(&self) -> &[u8] {
let buf_end = self.buf.len() - AES_NONCE_SIZE - AES_TAG_SIZE;
&self.buf[DATA_HEADER_SIZE..buf_end]
}
/// Get a mutable reference to the entire useable internal buffer.
pub fn buffer_mut(&mut self) -> &mut [u8] {
let buf_end = self.buf.len() - AES_NONCE_SIZE - AES_TAG_SIZE;
&mut self.buf[DATA_HEADER_SIZE..buf_end]
}
/// Sets the amount of bytes in use by the buffer.
pub fn set_size(&mut self, size: usize) {
self.size = size + DATA_HEADER_SIZE;
}
}
impl Default for PacketBuffer {
fn default() -> Self {
Self::new()
}
}
impl From<[u8; 32]> for SecretKey {
/// Load a secret key from a byte array.
fn from(bytes: [u8; 32]) -> SecretKey {
SecretKey(x25519_dalek::StaticSecret::from(bytes))
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&faster_hex::hex_string(self.as_bytes()))
}
}
impl Serialize for PublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&faster_hex::hex_string(self.as_bytes()))
}
}
struct PublicKeyVisitor;
impl Visitor<'_> for PublicKeyVisitor {
type Value = PublicKey;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("A hex encoded public key (64 characters)")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() != 64 {
Err(E::custom("Public key is 64 characters long"))
} else {
let mut backing = [0; 32];
faster_hex::hex_decode(v.as_bytes(), &mut backing)
.map_err(|_| E::custom("PublicKey is not valid hex"))?;
Ok(PublicKey(backing.into()))
}
}
}
impl<'de> Deserialize<'de> for PublicKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(PublicKeyVisitor)
}
}
impl From<[u8; 32]> for PublicKey {
/// Given a byte array, construct a `PublicKey`.
fn from(bytes: [u8; 32]) -> PublicKey {
PublicKey(x25519_dalek::PublicKey::from(bytes))
}
}
impl TryFrom<&str> for PublicKey {
type Error = faster_hex::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
let mut output = [0u8; 32];
faster_hex::hex_decode(value.as_bytes(), &mut output)?;
Ok(PublicKey::from(output))
}
}
impl From<&SecretKey> for PublicKey {
fn from(value: &SecretKey) -> Self {
PublicKey(x25519_dalek::PublicKey::from(&value.0))
}
}
impl Deref for SharedSecret {
type Target = [u8; 32];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Deref for PacketBuffer {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.buf[DATA_HEADER_SIZE..self.size]
}
}
impl Deref for PacketBufferHeader<'_> {
type Target = [u8; DATA_HEADER_SIZE];
fn deref(&self) -> &Self::Target {
self.data
}
}
impl Deref for PacketBufferHeaderMut<'_> {
type Target = [u8; DATA_HEADER_SIZE];
fn deref(&self) -> &Self::Target {
self.data
}
}
impl DerefMut for PacketBufferHeaderMut<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data
}
}
impl fmt::Debug for PacketBuffer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PacketBuffer")
.field("data", &"...")
.field("len", &self.size)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::{PacketBuffer, SecretKey, AES_NONCE_SIZE, AES_TAG_SIZE, DATA_HEADER_SIZE};
#[test]
/// Test if encryption works in general. We just create some random value and encrypt it.
/// Specifically, this will help to catch runtime panics in case AES_TAG_SIZE or AES_NONCE_SIZE
/// don't have a proper value aligned with the underlying AES_GCM implementation.
fn encryption_succeeds() {
let k1 = SecretKey::new();
let k2 = SecretKey::new();
let ss = k1.shared_secret(&(&k2).into());
let mut pb = PacketBuffer::new();
let data = b"vnno30nv f654q364 vfsv 44"; // Random keyboard smash.
pb.buffer_mut()[..data.len()].copy_from_slice(data);
pb.set_size(data.len());
// We only care that this does not panic.
let res = ss.encrypt(pb);
// At the same time, check expected size.
assert_eq!(
res.len(),
data.len() + DATA_HEADER_SIZE + AES_TAG_SIZE + AES_NONCE_SIZE
);
}
#[test]
/// Encrypt a value and then decrypt it. This makes sure the decrypt flow and encrypt flow
/// match, and both follow the expected format. Also, we don't reuse the shared secret for
/// decryption, but instead generate the secret again the other way round, to simulate a remote
/// node.
fn encrypt_decrypt_roundtrip() {
let k1 = SecretKey::new();
let k2 = SecretKey::new();
let ss1 = k1.shared_secret(&(&k2).into());
let ss2 = k2.shared_secret(&(&k1).into());
// This assertion is not strictly necessary as it will be checked below implicitly.
assert_eq!(ss1.as_slice(), ss2.as_slice());
let data = b"dsafjiqjo23 u2953u8 3oid fjo321j";
let mut pb = PacketBuffer::new();
pb.buffer_mut()[..data.len()].copy_from_slice(data);
pb.set_size(data.len());
let res = ss1.encrypt(pb);
let original = ss2.decrypt(res).expect("Decryption works");
assert_eq!(&*original, &data[..]);
}
#[test]
/// Test if PacketBufferHeaderMut actually modifies the PacketBuffer storage.
fn modify_header() {
let mut pb = PacketBuffer::new();
let mut header = pb.header_mut();
header[0] = 1;
header[1] = 2;
header[2] = 3;
header[3] = 4;
assert_eq!(pb.buf[..DATA_HEADER_SIZE], [1, 2, 3, 4]);
}
#[test]
/// Verify [`PacketBuffer::buffer`] and [`PacketBuffer::buffer_mut`] actually have the
/// appropriate size.
fn buffer_mapping() {
let mut pb = PacketBuffer::new();
assert_eq!(pb.buffer().len(), super::PACKET_SIZE);
assert_eq!(pb.buffer_mut().len(), super::PACKET_SIZE);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/data.rs | mycelium/src/data.rs | use std::net::{IpAddr, Ipv6Addr};
use etherparse::{
icmpv6::{DestUnreachableCode, TimeExceededCode},
Icmpv6Type, PacketBuilder,
};
use futures::{Sink, SinkExt, Stream, StreamExt};
use tokio::sync::mpsc::UnboundedReceiver;
use tracing::{debug, error, trace, warn};
use crate::{crypto::PacketBuffer, metrics::Metrics, packet::DataPacket, router::Router};
/// Current version of the user data header.
const USER_DATA_VERSION: u8 = 1;
/// Type value indicating L3 data in the user data header.
const USER_DATA_L3_TYPE: u8 = 0;
/// Type value indicating a user message in the data header.
const USER_DATA_MESSAGE_TYPE: u8 = 1;
/// Type value indicating an ICMP packet not returned as regular IPv6 traffic. This is needed when
/// intermediate nodes send back icmp data, as the original data is encrypted.
const USER_DATA_OOB_ICMP: u8 = 2;
/// Minimum size in bytes of an IPv6 header.
const IPV6_MIN_HEADER_SIZE: usize = 40;
/// Size of an ICMPv6 header.
const ICMP6_HEADER_SIZE: usize = 8;
/// Minimum MTU for IPV6 according to https://www.rfc-editor.org/rfc/rfc8200#section-5.
/// For ICMP, the packet must not be greater than this value. This is specified in
/// https://datatracker.ietf.org/doc/html/rfc4443#section-2.4, section (c).
const MIN_IPV6_MTU: usize = 1280;
/// Mask applied to the first byte of an IP header to extract the version.
const IP_VERSION_MASK: u8 = 0b1111_0000;
/// Version byte of an IP header indicating IPv6. Since the version is only 4 bits, the lower bits
/// must be masked first.
const IPV6_VERSION_BYTE: u8 = 0b0110_0000;
/// Default hop limit for message packets. For now this is set to 64 hops.
///
/// For regular l3 packets, we copy the hop limit from the packet itself. We can't do that here, so
/// 64 is used as sane default.
const MESSAGE_HOP_LIMIT: u8 = 64;
/// The DataPlane manages forwarding/receiving of local data packets to the [`Router`], and the
/// encryption/decryption of them.
///
/// DataPlane itself can be cloned, but this is not cheap on the router and should be avoided.
pub struct DataPlane<M> {
router: Router<M>,
}
impl<M> DataPlane<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Create a new `DataPlane` using the given [`Router`] for packet handling.
///
/// `l3_packet_stream` is a stream of l3 packets from the host, usually read from a TUN interface.
/// `l3_packet_sink` is a sink for l3 packets received from a romte, usually send to a TUN interface,
pub fn new<S, T, U>(
router: Router<M>,
l3_packet_stream: S,
l3_packet_sink: T,
message_packet_sink: U,
host_packet_source: UnboundedReceiver<DataPacket>,
) -> Self
where
S: Stream<Item = Result<PacketBuffer, std::io::Error>> + Send + Unpin + 'static,
T: Sink<PacketBuffer> + Clone + Send + Unpin + 'static,
T::Error: std::fmt::Display,
U: Sink<(PacketBuffer, IpAddr, IpAddr)> + Send + Unpin + 'static,
U::Error: std::fmt::Display,
{
let dp = Self { router };
tokio::spawn(
dp.clone()
.inject_l3_packet_loop(l3_packet_stream, l3_packet_sink.clone()),
);
tokio::spawn(dp.clone().extract_packet_loop(
l3_packet_sink,
message_packet_sink,
host_packet_source,
));
dp
}
/// Get a reference to the [`Router`] used.
pub fn router(&self) -> &Router<M> {
&self.router
}
async fn inject_l3_packet_loop<S, T>(self, mut l3_packet_stream: S, mut l3_packet_sink: T)
where
// TODO: no result
// TODO: should IP extraction be handled higher up?
S: Stream<Item = Result<PacketBuffer, std::io::Error>> + Send + Unpin + 'static,
T: Sink<PacketBuffer> + Clone + Send + Unpin + 'static,
T::Error: std::fmt::Display,
{
let node_subnet = self.router.node_tun_subnet();
while let Some(packet) = l3_packet_stream.next().await {
let mut packet = match packet {
Err(e) => {
error!("Failed to read packet from TUN interface {e}");
continue;
}
Ok(packet) => packet,
};
trace!("Received packet from tun");
// Parse an IPv6 header. We don't care about the full header in reality. What we want
// to know is:
// - This is an IPv6 header
// - Hop limit
// - Source address
// - Destination address
// This translates to the following requirements:
// - at least 40 bytes of data, as that is the minimum size of an IPv6 header
// - first 4 bits (version) are the constant 6 (0b0110)
// - src is byte 9-24 (8-23 0 indexed).
// - dst is byte 25-40 (24-39 0 indexed).
if packet.len() < IPV6_MIN_HEADER_SIZE {
trace!("Packet can't contain an IPv6 header");
continue;
}
if packet[0] & IP_VERSION_MASK != IPV6_VERSION_BYTE {
trace!("Packet is not IPv6");
continue;
}
let hop_limit = u8::from_be_bytes([packet[7]]);
let src_ip = Ipv6Addr::from(
<&[u8] as TryInto<[u8; 16]>>::try_into(&packet[8..24])
.expect("Static range bounds on slice are correct length"),
);
let dst_ip = Ipv6Addr::from(
<&[u8] as TryInto<[u8; 16]>>::try_into(&packet[24..40])
.expect("Static range bounds on slice are correct length"),
);
// If this is a packet for our own Subnet, it means there is no local configuration for
// the destination ip or /64 subnet, and the IP is unreachable
if node_subnet.contains_ip(dst_ip.into()) {
trace!(
"Replying to local packet for unexisting address: {}",
dst_ip
);
let mut icmp_packet = PacketBuffer::new();
let host = self.router.node_public_key().address().octets();
let icmp = PacketBuilder::ipv6(host, src_ip.octets(), 64).icmpv6(
Icmpv6Type::DestinationUnreachable(DestUnreachableCode::Address),
);
icmp_packet.set_size(icmp.size(packet.len().min(1280 - 48)));
let mut writer = &mut icmp_packet.buffer_mut()[..];
if let Err(e) = icmp.write(&mut writer, &packet[..packet.len().min(1280 - 48)]) {
error!("Failed to construct ICMP packet: {e}");
continue;
}
if let Err(e) = l3_packet_sink.send(icmp_packet).await {
error!("Failed to send ICMP packet to host: {e}");
}
continue;
}
trace!("Received packet from TUN with dest addr: {:?}", dst_ip);
// Check if the source address is part of 400::/7
let first_src_byte = src_ip.segments()[0] >> 8;
if !(0x04..0x06).contains(&first_src_byte) {
let mut icmp_packet = PacketBuffer::new();
let host = self.router.node_public_key().address().octets();
let icmp = PacketBuilder::ipv6(host, src_ip.octets(), 64).icmpv6(
Icmpv6Type::DestinationUnreachable(
DestUnreachableCode::SourceAddressFailedPolicy,
),
);
icmp_packet.set_size(icmp.size(packet.len().min(1280 - 48)));
let mut writer = &mut icmp_packet.buffer_mut()[..];
if let Err(e) = icmp.write(&mut writer, &packet[..packet.len().min(1280 - 48)]) {
error!("Failed to construct ICMP packet: {e}");
continue;
}
if let Err(e) = l3_packet_sink.send(icmp_packet).await {
error!("Failed to send ICMP packet to host: {e}");
}
continue;
}
// No need to verify destination address, if it is not part of the global subnet there
// should not be a route for it, and therefore the route step will generate the
// appropriate ICMP.
let mut header = packet.header_mut();
header[0] = USER_DATA_VERSION;
header[1] = USER_DATA_L3_TYPE;
if let Some(icmp) = self.encrypt_and_route_packet(src_ip, dst_ip, hop_limit, packet) {
if let Err(e) = l3_packet_sink.send(icmp).await {
error!("Could not forward icmp packet back to TUN interface {e}");
}
}
}
warn!("Data inject loop from host to router ended");
}
/// Inject a new packet where the content is a `message` fragment.
pub fn inject_message_packet(
&self,
src_ip: Ipv6Addr,
dst_ip: Ipv6Addr,
mut packet: PacketBuffer,
) {
let mut header = packet.header_mut();
header[0] = USER_DATA_VERSION;
header[1] = USER_DATA_MESSAGE_TYPE;
self.encrypt_and_route_packet(src_ip, dst_ip, MESSAGE_HOP_LIMIT, packet);
}
/// Encrypt the content of a packet based on the destination key, and then inject the packet
/// into the [`Router`] for processing.
///
/// If no key exists for the destination, the content can'be encrypted, the packet is not injected
/// into the router, and a packet is returned containing an ICMP packet. Note that a return
/// value of [`Option::None`] does not mean the packet was successfully forwarded;
fn encrypt_and_route_packet(
&self,
src_ip: Ipv6Addr,
dst_ip: Ipv6Addr,
hop_limit: u8,
packet: PacketBuffer,
) -> Option<PacketBuffer> {
// If the packet only has a TTL of 1, we won't be able to route it to the destination
// regardless, so just reply with an unencrypted TTL exceeded ICMP.
if hop_limit < 2 {
debug!(
packet.ttl = hop_limit,
packet.src = %src_ip,
packet.dst = %dst_ip,
"Attempting to route packet with insufficient TTL",
);
let mut pb = PacketBuffer::new();
// From self to self
let icmp = PacketBuilder::ipv6(src_ip.octets(), src_ip.octets(), hop_limit)
.icmpv6(Icmpv6Type::TimeExceeded(TimeExceededCode::HopLimitExceeded));
// Scale to max size if needed
let orig_buf_end = packet
.buffer()
.len()
.min(MIN_IPV6_MTU - IPV6_MIN_HEADER_SIZE - ICMP6_HEADER_SIZE);
pb.set_size(icmp.size(orig_buf_end));
let mut b = pb.buffer_mut();
if let Err(e) = icmp.write(&mut b, &packet.buffer()[..orig_buf_end]) {
error!("Failed to construct time exceeded ICMP packet {e}");
return None;
}
return Some(pb);
}
// Get shared secret from node and dest address
let shared_secret = match self.router.get_shared_secret_if_selected(dst_ip.into()) {
Some(ss) => ss,
// If we don't have a route to the destination subnet, reply with ICMP no route to
// host. Do this here as well to avoid encrypting the ICMP to ourselves.
None => {
debug!(
packet.src = %src_ip,
packet.dst = %dst_ip,
"No entry found for destination address, dropping packet",
);
let mut pb = PacketBuffer::new();
// From self to self
let icmp = PacketBuilder::ipv6(src_ip.octets(), src_ip.octets(), hop_limit).icmpv6(
Icmpv6Type::DestinationUnreachable(DestUnreachableCode::NoRoute),
);
// Scale to max size if needed
let orig_buf_end = packet
.buffer()
.len()
.min(MIN_IPV6_MTU - IPV6_MIN_HEADER_SIZE - ICMP6_HEADER_SIZE);
pb.set_size(icmp.size(orig_buf_end));
let mut b = pb.buffer_mut();
if let Err(e) = icmp.write(&mut b, &packet.buffer()[..orig_buf_end]) {
error!("Failed to construct no route to host ICMP packet {e}");
return None;
}
return Some(pb);
}
};
self.router.route_packet(DataPacket {
dst_ip,
src_ip,
hop_limit,
raw_data: shared_secret.encrypt(packet),
});
None
}
async fn extract_packet_loop<T, U>(
self,
mut l3_packet_sink: T,
mut message_packet_sink: U,
mut host_packet_source: UnboundedReceiver<DataPacket>,
) where
T: Sink<PacketBuffer> + Send + Unpin + 'static,
T::Error: std::fmt::Display,
U: Sink<(PacketBuffer, IpAddr, IpAddr)> + Send + Unpin + 'static,
U::Error: std::fmt::Display,
{
while let Some(data_packet) = host_packet_source.recv().await {
// decrypt & send to TUN interface
let shared_secret = if let Some(ss) = self
.router
.get_shared_secret_from_dest(data_packet.src_ip.into())
{
ss
} else {
trace!("Received packet from unknown sender");
continue;
};
let mut decrypted_packet = match shared_secret.decrypt(data_packet.raw_data) {
Ok(data) => data,
Err(_) => {
debug!("Dropping data packet with invalid encrypted content");
continue;
}
};
// Check header
let header = decrypted_packet.header();
if header[0] != USER_DATA_VERSION {
trace!("Dropping decrypted packet with unknown header version");
continue;
}
// Route based on packet type.
match header[1] {
USER_DATA_L3_TYPE => {
let real_packet = decrypted_packet.buffer_mut();
if real_packet.len() < IPV6_MIN_HEADER_SIZE {
debug!(
"Decrypted packet is too short, can't possibly be a valid IPv6 packet"
);
continue;
}
// Adjust the hop limit in the decrypted packet to the new value.
real_packet[7] = data_packet.hop_limit;
if let Err(e) = l3_packet_sink.send(decrypted_packet).await {
error!("Failed to send packet on local TUN interface: {e}",);
continue;
}
}
USER_DATA_MESSAGE_TYPE => {
if let Err(e) = message_packet_sink
.send((
decrypted_packet,
IpAddr::V6(data_packet.src_ip),
IpAddr::V6(data_packet.dst_ip),
))
.await
{
error!("Failed to send packet to message handler: {e}",);
continue;
}
}
USER_DATA_OOB_ICMP => {
let real_packet = &*decrypted_packet;
if real_packet.len() < IPV6_MIN_HEADER_SIZE + ICMP6_HEADER_SIZE + 16 {
debug!(
"Decrypted packet is too short, can't possibly be a valid IPv6 ICMP packet"
);
continue;
}
if real_packet.len() > MIN_IPV6_MTU + 16 {
debug!("Discarding ICMP packet which is too large");
continue;
}
let dec_ip = Ipv6Addr::from(
<&[u8] as TryInto<[u8; 16]>>::try_into(&real_packet[..16]).unwrap(),
);
trace!("ICMP for original target {dec_ip}");
let key =
if let Some(key) = self.router.get_shared_secret_from_dest(dec_ip.into()) {
key
} else {
debug!("Can't decrypt OOB ICMP packet from unknown host");
continue;
};
let (_, body) = match etherparse::IpHeaders::from_slice(&real_packet[16..]) {
Ok(r) => r,
Err(e) => {
// This is a node which does not adhere to the protocol of sending back
// ICMP like this, or it is intentionally sending mallicious packets.
debug!(
"Dropping malformed OOB ICMP packet from {} for {e}",
data_packet.src_ip
);
continue;
}
};
let (header, body) = match etherparse::Icmpv6Header::from_slice(body.payload) {
Ok(r) => r,
Err(e) => {
// This is a node which does not adhere to the protocol of sending back
// ICMP like this, or it is intentionally sending mallicious packets.
debug!(
"Dropping OOB ICMP packet from {} with malformed ICMP header ({e})",
data_packet.src_ip
);
continue;
}
};
// Where are the leftover bytes coming from
let orig_pb = match key.decrypt(body[..body.len()].to_vec()) {
Ok(pb) => pb,
Err(e) => {
warn!("Failed to decrypt ICMP data body {e}");
continue;
}
};
let packet = etherparse::PacketBuilder::ipv6(
data_packet.src_ip.octets(),
data_packet.dst_ip.octets(),
data_packet.hop_limit,
)
.icmpv6(header.icmp_type);
let serialized_icmp = packet.size(orig_pb.len());
let mut rp = PacketBuffer::new();
rp.set_size(serialized_icmp);
if let Err(e) =
packet.write(&mut (&mut rp.buffer_mut()[..serialized_icmp]), &orig_pb)
{
error!("Could not reconstruct icmp packet {e}");
continue;
}
if let Err(e) = l3_packet_sink.send(rp).await {
error!("Failed to send packet on local TUN interface: {e}",);
continue;
}
}
_ => {
trace!("Dropping decrypted packet with unknown protocol type");
continue;
}
}
}
warn!("Extract loop from router to host ended");
}
}
impl<M> Clone for DataPlane<M>
where
M: Clone,
{
fn clone(&self) -> Self {
Self {
router: self.router.clone(),
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/message.rs | mycelium/src/message.rs | //! Module for working with "messages".
//!
//! A message is an arbitrary bag of bytes sent by a node to a different node. A message is
//! considered application defined data (L7), and we make no assumptions of any kind regarding the
//! structure. We only care about sending the message to the remote in the most reliable way
//! possible.
use core::fmt;
#[cfg(target_family = "unix")]
use std::io;
#[cfg(target_family = "unix")]
use std::path::PathBuf;
use std::{
collections::{HashMap, VecDeque},
marker::PhantomData,
net::IpAddr,
ops::{Deref, DerefMut},
sync::{Arc, Mutex, RwLock},
time::{self, Duration},
};
use futures::{Stream, StreamExt};
use rand::Fill;
use serde::{de::Visitor, Deserialize, Deserializer, Serialize};
#[cfg(target_family = "unix")]
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[cfg(target_family = "unix")]
use tokio::net::UnixStream;
use tokio::sync::watch;
use topic::MessageAction;
use tracing::{debug, error, trace, warn};
use crate::{
crypto::{PacketBuffer, PublicKey},
data::DataPlane,
message::{chunk::MessageChunk, done::MessageDone, init::MessageInit},
metrics::Metrics,
subnet::Subnet,
};
pub use topic::TopicConfig;
mod chunk;
mod done;
mod init;
mod topic;
/// The amount of time to try and send messages before we give up.
const MESSAGE_SEND_WINDOW: Duration = Duration::from_secs(60 * 5);
/// The amount of time to wait before sending a chunk again if receipt is not acknowledged.
const RETRANSMISSION_DELAY: Duration = Duration::from_millis(100);
/// Amount of time between sweeps of the subscriber list to clear orphaned subscribers.
const REPLY_SUBSCRIBER_CLEAR_DELAY: Duration = Duration::from_secs(60);
/// Default timeout for waiting for a reply from a socket.
#[cfg(target_family = "unix")]
const SOCKET_REPLY_TIMEOUT: Duration = Duration::from_secs(5);
/// The average size of a single chunk. This is mainly intended to preallocate the chunk array on
/// the receiver size. This value should allow reasonable overhead for standard MTU.
const AVERAGE_CHUNK_SIZE: usize = 1_300;
/// The minimum size of a data chunk. Chunks which have a size smaller than this are rejected. An
/// exception is made for the last chunk.
const MINIMUM_CHUNK_SIZE: u64 = 250;
/// The size in bytes of the message header which starts each user message packet.
const MESSAGE_HEADER_SIZE: usize = 12;
/// The size in bytes of a message ID.
const MESSAGE_ID_SIZE: usize = 8;
/// Flag indicating we are starting a new message. The message ID is specified in the header. The
/// body contains the length of the message. The receiver must create an entry for the new ID. This
/// flag must always be set on the first packet of a message stream. If a receiver already received
/// data for this message and a new packet comes in with this flag set for this message, all
/// existing data must be removed on the receiver side.
const FLAG_MESSAGE_INIT: u16 = 0b1000_0000_0000_0000;
// Flag indicating the message with the given ID is done, i.e. it has been fully transmitted.
const FLAG_MESSAGE_DONE: u16 = 0b0100_0000_0000_0000;
/// Indicates the message with this ID is aborted by the sender and the receiver should discard it.
/// The receiver can ignore this if it fully received the message.
const FLAG_MESSAGE_ABORTED: u16 = 0b0010_0000_0000_0000;
/// Flag indicating we are transferring a data chunk.
const FLAG_MESSAGE_CHUNK: u16 = 0b0001_0000_0000_0000;
/// Flag indicating the message with the given ID has been read by the receiver, that is it has
/// been transferred to an external process.
const FLAG_MESSAGE_READ: u16 = 0b0000_1000_0000_0000;
/// Flag indicating we are sending a reply to a received message. The message ID used is the same
/// as the received message.
const FLAG_MESSAGE_REPLY: u16 = 0b0000_0100_0000_0000;
/// Flag acknowledging receipt of a packet. Once this has been received, the packet __should not__ be
/// transmitted again by the sender.
const FLAG_MESSAGE_ACK: u16 = 0b0000_0001_0000_0000;
/// Length of a message checksum in bytes.
const MESSAGE_CHECKSUM_LENGTH: usize = 32;
/// Checksum of a message used to verify received message integrity.
pub type Checksum = [u8; MESSAGE_CHECKSUM_LENGTH];
/// Response type when pushing a message.
pub type MessagePushResponse = (MessageId, Option<watch::Receiver<Option<ReceivedMessage>>>);
pub struct MessageStack<M> {
// The DataPlane is wrappen in a Mutex since it does not implement Sync.
data_plane: Arc<Mutex<DataPlane<M>>>,
inbox: Arc<Mutex<MessageInbox>>,
outbox: Arc<Mutex<MessageOutbox>>,
/// Receiver handle for inbox listeners (basically a condvar).
subscriber: watch::Receiver<()>,
/// Subscribers for messages with specific ID's. These are intended to be used when waiting for
/// a reply.
/// This takes an Option as value to avoid the hassle of constructing a dummy value when
/// creating the watch channel.
reply_subscribers: Arc<Mutex<HashMap<MessageId, watch::Sender<Option<ReceivedMessage>>>>>,
/// Topic-specific configuration
topic_config: Arc<RwLock<TopicConfig>>,
}
struct MessageOutbox {
msges: HashMap<MessageId, OutboundMessageInfo>,
}
struct MessageInbox {
/// Messages which are still being transmitted.
// TODO: MessageID is part of ReceivedMessageInfo, rework this into HashSet?
pending_msges: HashMap<MessageId, ReceivedMessageInfo>,
/// Messages which have been completed.
complete_msges: VecDeque<ReceivedMessage>,
/// Notification sender used to allert subscribed listeners.
notify: watch::Sender<()>,
}
struct ReceivedMessageInfo {
id: MessageId,
is_reply: bool,
src: IpAddr,
dst: IpAddr,
/// Length of the finished message.
len: u64,
/// Optional topic of the message.
topic: Vec<u8>,
chunks: Vec<Option<Chunk>>,
}
#[derive(Clone)]
pub struct ReceivedMessage {
/// Id of the message.
pub id: MessageId,
/// This message is a reply to an initial message with the given id.
pub is_reply: bool,
/// The overlay ip of the sender.
pub src_ip: IpAddr,
/// The public key of the sender of the message.
pub src_pk: PublicKey,
/// The overlay ip of the receiver.
pub dst_ip: IpAddr,
/// The public key of the receiver of the message. This is always ours.
pub dst_pk: PublicKey,
/// The possible topic of the message.
pub topic: Vec<u8>,
/// Actual message.
pub data: Vec<u8>,
}
/// A chunk of a message. This represents individual data pieces on the receiver side.
#[derive(Clone)]
struct Chunk {
data: Vec<u8>,
}
/// Description of an individual chunk.
struct ChunkState {
/// Index of the chunk in the chunk stream.
chunk_idx: usize,
/// Offset of the chunk in the message.
chunk_offset: usize,
/// Size of the chunk.
// TODO: is this needed or can this be extrapolated by checking the next chunk in the list?
chunk_size: usize,
/// Transmit state of the chunk.
chunk_transmit_state: ChunkTransmitState,
}
/// Transmission state of an individual chunk
enum ChunkTransmitState {
/// The chunk hasn't been transmitted yet.
Started,
/// The chunk has been sent but we did not receive an acknowledgment yet. The time the chunk
/// was sent is remembered so we can calulcate if we need to try sending it again.
Sent(std::time::Instant),
/// The receiver has acknowledged receipt of the chunk.
Acked,
}
#[derive(PartialEq)]
enum TransmissionState {
/// Transmission has not started yet.
Init,
/// Transmission is in progress (ACK received for INIT).
InProgress,
/// Remote acknowledged full reception.
Received,
/// Remote indicated the message has been read by an external entity.
Read,
/// Transmission aborted by us. We indicated this by sending an abort flag to the receiver.
Aborted,
}
#[derive(Debug, Clone, Copy)]
pub enum PushMessageError {
/// The topic set in the message is too large.
TopicTooLarge,
}
impl MessageInbox {
fn new(notify: watch::Sender<()>) -> Self {
Self {
pending_msges: HashMap::new(),
complete_msges: VecDeque::new(),
notify,
}
}
}
impl MessageOutbox {
/// Create a new `MessageOutbox` ready for use.
fn new() -> Self {
Self {
msges: HashMap::new(),
}
}
/// Insert a new message for tracking during (and after) sending.
fn insert(&mut self, msg: OutboundMessageInfo) {
self.msges.insert(msg.msg.id, msg);
}
}
impl<M> MessageStack<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Create a new `MessageStack`. This uses the provided [`DataPlane`] to inject message
/// packets. Received packets must be injected into the `MessageStack` through the provided
/// [`Stream`].
pub fn new<S>(
data_plane: DataPlane<M>,
message_packet_stream: S,
topic_config: Option<TopicConfig>,
) -> Self
where
S: Stream<Item = (PacketBuffer, IpAddr, IpAddr)> + Send + Unpin + 'static,
{
let (notify, subscriber) = watch::channel(());
let ms = Self {
data_plane: Arc::new(Mutex::new(data_plane)),
inbox: Arc::new(Mutex::new(MessageInbox::new(notify))),
outbox: Arc::new(Mutex::new(MessageOutbox::new())),
subscriber,
reply_subscribers: Arc::new(Mutex::new(HashMap::new())),
topic_config: Arc::new(RwLock::new(topic_config.unwrap_or_default())),
};
tokio::task::spawn(
ms.clone()
.handle_incoming_message_packets(message_packet_stream),
);
// task to periodically clear leftover reply subscribers
{
let ms = ms.clone();
tokio::task::spawn(async move {
loop {
tokio::time::sleep(REPLY_SUBSCRIBER_CLEAR_DELAY).await;
let mut subs = ms.reply_subscribers.lock().unwrap();
subs.retain(|id, v| {
if v.receiver_count() == 0 {
debug!(
"Clearing orphaned subscription for message id {}",
id.as_hex()
);
false
} else {
true
}
})
}
});
}
ms
}
/// Handle incoming messages from the [`DataPlane`].
async fn handle_incoming_message_packets<S>(self, mut message_packet_stream: S)
where
S: Stream<Item = (PacketBuffer, IpAddr, IpAddr)> + Send + Unpin + 'static,
{
while let Some((packet, src, dst)) = message_packet_stream.next().await {
let mp = MessagePacket::new(packet);
trace!(
"Received message packet with flags {:b}",
mp.header().flags()
);
if mp.header().flags().ack() {
self.handle_message_reply(mp);
} else {
self.handle_message(mp, src, dst);
}
}
warn!("Incoming message packet stream ended!");
}
/// Handle an incoming message packet which is a reply to a message we previously sent.
fn handle_message_reply(&self, mp: MessagePacket) {
let header = mp.header();
let message_id = header.message_id();
let flags = header.flags();
if flags.init() {
let mut outbox = self.outbox.lock().unwrap();
if let Some(message) = outbox.msges.get_mut(&message_id) {
if message.state != TransmissionState::Init {
debug!("Dropping INIT ACK for message not in init state");
return;
}
message.state = TransmissionState::InProgress;
// Transform message into chunks.
let mut chunks = Vec::with_capacity(message.len.div_ceil(AVERAGE_CHUNK_SIZE));
for (chunk_idx, data_chunk) in
message.msg.data.chunks(AVERAGE_CHUNK_SIZE).enumerate()
{
chunks.push(ChunkState {
chunk_idx,
chunk_offset: chunk_idx * AVERAGE_CHUNK_SIZE,
chunk_size: data_chunk.len(),
chunk_transmit_state: ChunkTransmitState::Started,
})
}
message.chunks = chunks;
}
} else if flags.chunk() {
// ACK for a chunk, mark chunk as received so it is not retried again.
let mut outbox = self.outbox.lock().unwrap();
if let Some(message) = outbox.msges.get_mut(&message_id) {
if message.state != TransmissionState::InProgress {
debug!("Dropping CHUNK ACK for message not being transmitted");
return;
}
let mc = MessageChunk::new(mp);
// Sanity checks. This is just to protect ourselves, if the other party is
// malicious it can return any data it wants here.
if mc.chunk_idx() > message.chunks.len() as u64 {
debug!("Dropping CHUNK ACK for message because ACK'ed chunk is out of bounds");
return;
}
// Don't check data size. It is the repsonsiblity of the other party to ensure he
// ACKs the right chunk. Additionally a malicious node could return a crafted input
// here anyway.
message.chunks[mc.chunk_idx() as usize].chunk_transmit_state =
ChunkTransmitState::Acked;
}
} else if flags.done() {
// ACK for full message.
let mut outbox = self.outbox.lock().unwrap();
if let Some(message) = outbox.msges.get_mut(&message_id) {
if message.state != TransmissionState::InProgress {
debug!("Dropping DONE ACK for message which is not being transmitted");
return;
}
message.state = TransmissionState::Received;
}
} else if flags.read() {
// Ack for a read flag. Since the original read flag is sent by the receiver, this
// means the sender indicates he has successfully received the notification that a
// userspace process has read the message. Note that read flags are only sent once, and
// this ack is only sent at most once, even if it gets lost. As a result, there is
// nothing to really do here, and this behavior (ACK READ) might be dropped in the
// future.
debug!("Received READ ACK");
} else {
debug!("Received unknown ACK message flags {:b}", flags);
}
}
/// Handle an incoming message packet which is **not** a reply to a packet we previously sent.
fn handle_message(&self, mp: MessagePacket, src: IpAddr, dst: IpAddr) {
let header = mp.header();
let message_id = header.message_id();
let flags = header.flags();
let reply = if flags.init() {
let is_reply = flags.reply();
let mi = MessageInit::new(mp);
// If this is not a reply, verify ACL
if !is_reply && !self.topic_allowed(mi.topic(), src) {
debug!("Dropping message whos src isn't allowed by ACL");
return;
}
// We receive a new message with an ID. If we already have a complete message, ignore
// it.
let mut inbox = self.inbox.lock().unwrap();
if inbox.complete_msges.iter().any(|m| m.id == message_id) {
debug!("Dropping INIT message as we already have a complete message with this ID");
return;
}
// Otherwise unilaterally reset the state. The message id space is large enough to
// avoid accidental collisions.
let expected_chunks = (mi.length() as usize).div_ceil(AVERAGE_CHUNK_SIZE);
let chunks = vec![None; expected_chunks];
let message = ReceivedMessageInfo {
id: message_id,
is_reply,
src,
dst,
len: mi.length(),
topic: mi.topic().into(),
chunks,
};
if inbox.pending_msges.insert(message_id, message).is_some() {
debug!("Dropped current pending message because we received a new message with INIT flag set for the same ID");
}
Some(mi.into_reply().into_inner())
} else if flags.chunk() {
// A chunk can only be received for incomplete messages. We don't have to check the
// completed messages. Either there is none, so no problem, or there is one, in which
// case we consider this to be a lingering chunk which was already accepted in the
// meantime (as the message is complete).
//
// SAFETY: a malicious node could send a lot of empty chunks, which trigger allocations
// to hold the chunk array, effectively exhausting memory. As such, we first need to
// determine if the chunk is feasible.
let mut inbox = self.inbox.lock().unwrap();
if let Some(message) = inbox.pending_msges.get_mut(&message_id) {
let mc = MessageChunk::new(mp);
// Make sure the data is within bounds of the message being sent.
if message.len < mc.chunk_offset() + mc.chunk_size() {
debug!("Dropping invalid message CHUNK for being out of bounds");
return;
}
// Check max chunk idx.
let max_chunk_idx = if message.len == 0 {
0
} else {
message.len.div_ceil(MINIMUM_CHUNK_SIZE) - 1
};
if mc.chunk_idx() > max_chunk_idx {
debug!("Dropping CHUNK because index is too high");
return;
}
// Check chunk size, allow exception on last chunk.
if mc.chunk_size() < MINIMUM_CHUNK_SIZE && mc.chunk_idx() != max_chunk_idx {
debug!(
"Dropping CHUNK {}/{max_chunk_idx} which is too small ({} bytes / {MINIMUM_CHUNK_SIZE} bytes)",
mc.chunk_idx(),
mc.chunk_size()
);
return;
}
// Finally check if we have sufficient space for our chunks.
if message.chunks.len() as u64 <= mc.chunk_idx() {
// TODO: optimize
let chunks =
vec![None; (mc.chunk_idx() + 1 - message.chunks.len() as u64) as usize];
message.chunks.extend_from_slice(&chunks);
}
// Now insert the chunk. Overwrite any previous chunk.
message.chunks[mc.chunk_idx() as usize] = Some(Chunk {
data: mc.data().to_vec(),
});
Some(mc.into_reply().into_inner())
} else {
None
}
} else if flags.done() {
let mut inbox = self.inbox.lock().unwrap();
let md = MessageDone::new(mp);
// At this point, we should have all message chunks. Verify length and reassemble them.
if let Some(inbound_message) = inbox.pending_msges.get_mut(&message_id) {
// Check if we have sufficient chunks
if md.chunk_count() != inbound_message.chunks.len() as u64 {
// TODO: report error to sender
debug!("Message has invalid amount of chunks");
return;
}
// Track total size of data we have allocated.
let mut chunk_size = 0;
let mut message_data = Vec::with_capacity(inbound_message.len as usize);
// Chunks are inserted in order.
for chunk in &inbound_message.chunks {
if let Some(chunk) = chunk {
message_data.extend_from_slice(&chunk.data);
chunk_size += chunk.data.len();
} else {
// A none chunk is not possible, we should have all chunks
debug!("DONE received for incomplete message");
return;
}
}
// TODO: report back here if there is an error.
if chunk_size as u64 != inbound_message.len {
debug!("Message has invalid size");
return;
}
let message = Message {
id: inbound_message.id,
src: inbound_message.src,
dst: inbound_message.dst,
topic: inbound_message.topic.clone(),
data: message_data,
};
let checksum = message.checksum();
if checksum != md.checksum() {
debug!(
"Message has wrong checksum, got {} expected {}",
md.checksum().to_hex(),
checksum.to_hex()
);
return;
}
// Convert the IP's to PublicKeys.
let dp = self.data_plane.lock().unwrap();
let src_pubkey = if let Some(pk) = dp.router().get_pubkey(message.src) {
pk
} else {
warn!("No public key entry for IP we just received a message chunk from");
return;
};
// This always is our own key as we are receiving.
let dst_pubkey = dp.router().node_public_key();
let message = ReceivedMessage {
id: message.id,
is_reply: inbound_message.is_reply,
src_ip: message.src,
src_pk: src_pubkey,
dst_ip: message.dst,
dst_pk: dst_pubkey,
topic: message.topic,
data: message.data,
};
debug!("Message {} reception complete", message.id.as_hex());
// Check if we have any listeners and try to send the message to those first.
let mut subscribers = self.reply_subscribers.lock().unwrap();
// Use remove here since we are done with the subscriber
// TODO: only check this if the is_reply flag is set?
if let Some(sub) = subscribers.remove(&message.id) {
if let Err(e) = sub.send(Some(message.clone())) {
debug!("Subscriber quit before we could send the reply");
// Move message to be read if there were no subscribers.
inbox.complete_msges.push_back(e.0.unwrap());
// Notify subscribers we have a new message.
inbox.notify.send_replace(());
} else {
debug!("Informed subscriber of message reply");
}
} else {
// Check if the topic has a configured socket path
let socket_path = self
.topic_config
.read()
.expect("Can get read lock on topic config")
.get_topic_forward_socket(&message.topic)
.cloned();
if let Some(socket_path) = socket_path {
debug!(
"Forwarding message {} to socket {}",
message.id.as_hex(),
socket_path.display()
);
// Clone the message for use in the async task
let message_clone = message.clone();
let message_stack = self.clone();
// Drop the inbox lock before spawning the task to avoid deadlocks
std::mem::drop(inbox);
std::mem::drop(subscribers);
// Spawn a task to handle the socket communication
#[cfg(target_family = "unix")]
tokio::task::spawn(async move {
// Forward the message to the socket
match message_stack
.forward_to_socket(
&message_clone,
&socket_path,
SOCKET_REPLY_TIMEOUT,
)
.await
{
Ok(reply_data) => {
debug!(message_id = message_clone.id.as_hex(), "Received reply from socket, sending back to original sender");
// Send the reply back to the original sender
message_stack.reply_message(
message_clone.id,
message_clone.src_ip,
reply_data,
MESSAGE_SEND_WINDOW,
);
}
Err(e) => {
// Log the error
error!(err = % e, "Failed to forward message to socket");
// Fall back to pushing to the queue
let mut inbox = message_stack.inbox.lock().unwrap();
inbox.complete_msges.push_back(message_clone);
inbox.notify.send_replace(());
}
}
});
#[cfg(not(target_family = "unix"))]
{
let mut inbox = message_stack.inbox.lock().unwrap();
inbox.complete_msges.push_back(message_clone);
inbox.notify.send_replace(());
}
// Re-acquire the inbox lock to continue processing
inbox = self.inbox.lock().unwrap();
} else {
// No socket path configured, push to the queue as usual
inbox.complete_msges.push_back(message);
// Notify subscribers we have a new message.
inbox.notify.send_replace(());
}
}
inbox.pending_msges.remove(&message_id);
Some(md.into_reply().into_inner())
} else {
None
}
} else if flags.read() {
let mut outbox = self.outbox.lock().unwrap();
if let Some(message) = outbox.msges.get_mut(&message_id) {
if message.state != TransmissionState::Received {
debug!("Got READ for message which is not in received state");
return;
}
debug!("Receiver confirmed READ of message {}", message_id.as_hex());
message.state = TransmissionState::Read;
}
None
} else if flags.aborted() {
// If the message is not finished yet, discard it completely.
// But if it is finished, ignore this, i.e, nothing to do.
let mut inbox = self.inbox.lock().unwrap();
if inbox.pending_msges.remove(&message_id).is_some() {
debug!("Dropping pending message because we received an ABORT");
}
None
} else {
debug!("Received unknown message flags {:b}", flags);
None
};
if let Some(reply) = reply {
// This is a reply, so SRC -> DST and DST -> SRC
// FIXME: this can be fixed once the dataplane accepts generic IpAddr addresses.
match (src, dst) {
(IpAddr::V6(src), IpAddr::V6(dst)) => {
self.data_plane.lock().unwrap().inject_message_packet(
dst,
src,
reply.into_inner(),
);
}
_ => debug!("can only reply to message fragments if both src and dst are IPv6"),
}
}
}
/// Check if a topic is allowed for a given src
fn topic_allowed(&self, topic: &[u8], src: IpAddr) -> bool {
if let Some(whitelist_config) = self
.topic_config
.read()
.expect("Can get read lock on topic config")
.whitelist()
.get(topic)
{
debug!(?topic, %src, "Checking allow list for topic");
for subnet in whitelist_config.subnets() {
if subnet.contains_ip(src) {
return true;
}
}
false
} else {
let action = self
.topic_config
.read()
.expect("Can get read lock on topic config")
.default();
debug!(?action, ?topic, "Default action for topic");
matches!(action, MessageAction::Accept)
}
}
/// Forward a message to a Unix domain socket and wait for a reply
#[cfg(target_family = "unix")]
async fn forward_to_socket(
&self,
message: &ReceivedMessage,
socket_path: &PathBuf,
timeout: Duration,
) -> Result<Vec<u8>, SocketError> {
// Connect to the socket
let mut stream = UnixStream::connect(socket_path).await.map_err(|e| {
error!(
"Failed to connect to socket {}: {}",
socket_path.display(),
e
);
SocketError::IoError(e)
})?;
// Send the message data, wrap in a timeout as we can't set read timeout on the socket
tokio::time::timeout(timeout, stream.write_all(&message.data))
.await
.map_err(|_| SocketError::Timeout)?
.map_err(|e| {
error!("Failed to write to socket: {}", e);
SocketError::IoError(e)
})?;
// Read the reply
let mut reply = Vec::new();
match stream.read_to_end(&mut reply).await {
Ok(0) => {
debug!("Socket connection closed without sending a reply");
Err(SocketError::ConnectionClosed)
}
Ok(_) => {
debug!(reply_len = reply.len(), "Received reply from socket");
Ok(reply)
}
Err(e)
if e.kind() == io::ErrorKind::TimedOut || e.kind() == io::ErrorKind::WouldBlock =>
{
debug!("Timeout waiting for socket reply");
Err(SocketError::Timeout)
}
Err(e) => {
error!(err = %e, "Error reading from socket");
Err(SocketError::IoError(e))
}
}
}
}
/// Error type for socket communication
#[derive(Debug)]
#[cfg(target_family = "unix")]
enum SocketError {
/// I/O error occurred during socket communication
IoError(io::Error),
/// Timeout occurred while waiting for a reply
Timeout,
/// Socket connection was closed unexpectedly
ConnectionClosed,
}
#[cfg(target_family = "unix")]
impl core::fmt::Display for SocketError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::IoError(e) => write!(f, "I/O Error {e}"),
Self::Timeout => f.pad("timeout waiting for reply"),
Self::ConnectionClosed => f.pad("socket closed before we read a reply"),
}
}
}
impl<M> MessageStack<M>
where
M: Metrics + Clone + Send + 'static,
{
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | true |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/metric.rs | mycelium/src/metric.rs | //! Dedicated logic for
//! [metrics](https://datatracker.ietf.org/doc/html/rfc8966#metric-computation).
use core::fmt;
use std::ops::{Add, Sub};
/// Value of the infinite metric.
const METRIC_INFINITE: u16 = 0xFFFF;
/// A `Metric` is used to indicate the cost associated with a route. A lower Metric means a route
/// is more favorable.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
pub struct Metric(u16);
impl Metric {
/// Create a new `Metric` with the given value.
pub const fn new(value: u16) -> Self {
Metric(value)
}
/// Creates a new infinite `Metric`.
pub const fn infinite() -> Self {
Metric(METRIC_INFINITE)
}
/// Checks if this metric indicates a retracted route.
pub const fn is_infinite(&self) -> bool {
self.0 == METRIC_INFINITE
}
/// Checks if this metric represents a directly connected route.
pub const fn is_direct(&self) -> bool {
self.0 == 0
}
/// Computes the absolute value of the difference between this and another `Metric`.
pub fn delta(&self, rhs: &Self) -> Metric {
Metric(if self > rhs {
self.0 - rhs.0
} else {
rhs.0 - self.0
})
}
}
impl fmt::Display for Metric {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_infinite() {
f.pad("Infinite")
} else {
f.write_fmt(format_args!("{}", self.0))
}
}
}
impl From<u16> for Metric {
fn from(value: u16) -> Self {
Metric(value)
}
}
impl From<Metric> for u16 {
fn from(value: Metric) -> Self {
value.0
}
}
impl Add for Metric {
type Output = Self;
fn add(self, rhs: Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Add<&Metric> for &Metric {
type Output = Metric;
fn add(self, rhs: &Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Add<&Metric> for Metric {
type Output = Self;
fn add(self, rhs: &Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Add<Metric> for &Metric {
type Output = Metric;
fn add(self, rhs: Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Sub<Metric> for Metric {
type Output = Metric;
fn sub(self, rhs: Metric) -> Self::Output {
if rhs.is_infinite() {
panic!("Can't subtract an infinite metric");
}
if self.is_infinite() {
return Metric::infinite();
}
Metric(self.0.saturating_sub(rhs.0))
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/peer.rs | mycelium/src/peer.rs | use std::{
error::Error,
io,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock, Weak,
},
};
use tokio::{
select,
sync::{mpsc, Notify},
};
use tracing::{debug, error, info, trace};
use crate::{
connection::{Connection, ConnectionReadHalf, ConnectionWriteHalf},
packet::Packet,
};
use crate::{
packet::{ControlPacket, DataPacket},
sequence_number::SeqNo,
};
/// The maximum amount of packets to immediately send if they are ready when the first one is
/// received.
const PACKET_COALESCE_WINDOW: usize = 50;
/// The default link cost assigned to new peers before their actual cost is known.
///
/// In theory, the best value would be U16::MAX - 1, however this value would take too long to be
/// flushed out of the smoothed metric. A default of a 1000 (1 second) should be sufficiently large
/// to cover very bad connections, so they also converge to a smaller value. While there is no
/// issue with converging to a higher value (in other words, underestimating the latency to a
/// peer), this means that bad peers would briefly be more likely to be selected. Additionally,
/// since the latency increases, downstream peers would eventually find that the announced route
/// would become unfeasible, and send a seqno request (which should solve this efficiently). As a
/// tradeoff, it means it takes longer for new peers in the network to decrease to their actual
/// metric (in comparisson with a lower starting metric), though this is in itself a usefull thing
/// to have as it means peers joining the network would need to have some stability before being
/// selected as hop.
const DEFAULT_LINK_COST: u16 = 1000;
/// Multiplier for smoothed metric calculation of the existing smoothed metric.
const EXISTING_METRIC_FACTOR: u32 = 9;
/// Divisor for smoothed metric calcuation of the combined metric
const TOTAL_METRIC_DIVISOR: u32 = 10;
#[derive(Debug, Clone)]
/// A peer represents a directly connected participant in the network.
pub struct Peer {
inner: Arc<PeerInner>,
}
/// A weak reference to a peer, which does not prevent it from being cleaned up. This can be used
/// to check liveliness of the [`Peer`] instance it originated from.
pub struct PeerRef {
inner: Weak<PeerInner>,
}
impl Peer {
pub fn new<C: Connection + Unpin + Send + 'static>(
router_data_tx: mpsc::Sender<DataPacket>,
router_control_tx: mpsc::UnboundedSender<(ControlPacket, Peer)>,
connection: C,
dead_peer_sink: mpsc::Sender<Peer>,
) -> Result<Self, io::Error> {
// Data channel for peer
let (to_peer_data, mut from_routing_data) = mpsc::unbounded_channel::<DataPacket>();
// Control channel for peer
let (to_peer_control, mut from_routing_control) =
mpsc::unbounded_channel::<ControlPacket>();
let death_notifier = Arc::new(Notify::new());
let death_watcher = death_notifier.clone();
let peer = Peer {
inner: Arc::new(PeerInner {
state: RwLock::new(PeerState::new()),
to_peer_data,
to_peer_control,
connection_identifier: connection.identifier()?,
static_link_cost: connection.static_link_cost()?,
death_notifier,
alive: AtomicBool::new(true),
}),
};
{
let peer = peer.clone();
let (mut stream, mut sink) = connection.split();
let mut needs_flush = false;
tokio::spawn(async move {
loop {
select! {
packet = stream.receive_packet() => {
match packet {
Some(Ok(packet)) => {
match packet {
Packet::DataPacket(packet) => {
// An error here means the receiver is dropped/closed,
// this is not recoverable.
if let Err(error) = router_data_tx.send(packet).await{
error!("Error sending to to_routing_data: {}", error);
break
}
}
Packet::ControlPacket(packet) => {
if let Err(error) = router_control_tx.send((packet, peer.clone())) {
// An error here means the receiver is dropped/closed,
// this is not recoverable.
error!("Error sending to to_routing_control: {}", error);
break
}
}
}
}
Some(Err(e)) => {
error!("Frame error from {}: {e}", peer.connection_identifier());
break;
},
None => {
info!("Stream to {} is closed", peer.connection_identifier());
break;
}
}
}
rv = from_routing_data.recv(), if !needs_flush => {
match rv {
None => break,
Some(packet) => {
needs_flush = true;
if let Err(e) = sink.feed_data_packet(packet).await {
error!("Failed to feed data packet to connection: {e}");
break
}
for _ in 1..PACKET_COALESCE_WINDOW {
// There can be 2 cases of errors here, empty channel and no more
// senders. In both cases we don't really care at this point.
if let Ok(packet) = from_routing_data.try_recv() {
if let Err(e) = sink.feed_data_packet(packet).await {
error!("Failed to feed data packet to connection: {e}");
break
}
trace!("Instantly queued ready packet to transfer to peer");
} else {
// no packets ready, flush currently buffered ones
break
}
}
}
}
}
rv = from_routing_control.recv(), if !needs_flush => {
match rv {
None => break,
Some(packet) => {
needs_flush = true;
if let Err(e) = sink.feed_control_packet(packet).await {
error!("Failed to feed control packet to connection: {e}");
break
}
for _ in 1..PACKET_COALESCE_WINDOW {
// There can be 2 cases of errors here, empty channel and no more
// senders. In both cases we don't really care at this point.
if let Ok(packet) = from_routing_control.try_recv() {
if let Err(e) = sink.feed_control_packet(packet).await {
error!("Failed to feed data packet to connection: {e}");
break
}
} else {
// No packets ready, flush currently buffered ones
break
}
}
}
}
}
r = sink.flush(), if needs_flush => {
if let Err(e) = r {
error!("Failed to flush buffered peer connection packets: {e}");
break
}
needs_flush = false;
}
_ = death_watcher.notified() => {
// Attempt gracefull shutdown
// let mut framed = sink.reunite(stream).expect("SplitSink and SplitStream here can only be part of the same original Framned; Qed");
// let _ = framed.close().await;
break;
}
}
}
// Notify router we are dead, also modify our internal state to declare that.
// Relaxed ordering is fine, we just care that the variable is set.
peer.inner.alive.store(false, Ordering::Relaxed);
let remote_id = peer.connection_identifier().clone();
debug!("Notifying router peer {remote_id} is dead");
if let Err(e) = dead_peer_sink.send(peer).await {
error!("Peer {remote_id} could not notify router of termination: {e}");
}
});
};
Ok(peer)
}
/// Get current sequence number for this peer.
pub fn hello_seqno(&self) -> SeqNo {
self.inner.state.read().unwrap().hello_seqno
}
/// Adds 1 to the sequence number of this peer .
pub fn increment_hello_seqno(&self) {
self.inner.state.write().unwrap().hello_seqno += 1;
}
pub fn time_last_received_hello(&self) -> tokio::time::Instant {
self.inner.state.read().unwrap().time_last_received_hello
}
pub fn set_time_last_received_hello(&self, time: tokio::time::Instant) {
self.inner.state.write().unwrap().time_last_received_hello = time
}
/// For sending data packets towards a peer instance on this node.
/// It's send over the to_peer_data channel and read from the corresponding receiver.
/// The receiver sends the packet over the TCP stream towards the destined peer instance on another node
pub fn send_data_packet(&self, data_packet: DataPacket) -> Result<(), Box<dyn Error>> {
Ok(self.inner.to_peer_data.send(data_packet)?)
}
/// For sending control packets towards a peer instance on this node.
/// It's send over the to_peer_control channel and read from the corresponding receiver.
/// The receiver sends the packet over the TCP stream towards the destined peer instance on another node
pub fn send_control_packet(&self, control_packet: ControlPacket) -> Result<(), Box<dyn Error>> {
Ok(self.inner.to_peer_control.send(control_packet)?)
}
/// Get the cost to use the peer, i.e. the additional impact on the [`crate::metric::Metric`]
/// for using this `Peer`.
///
/// This is a smoothed value, which is calculated over the recent history of link cost.
pub fn link_cost(&self) -> u16 {
self.inner.state.read().unwrap().link_cost + self.inner.static_link_cost
}
/// Sets the link cost based on the provided value.
///
/// The link cost is not set to the given value, but rather to an average of recent values.
/// This makes sure short-lived, hard spikes of the link cost of a peer don't influence the
/// routing.
pub fn set_link_cost(&self, new_link_cost: u16) {
// Calculate new link cost by multiplying (i.e. scaling) old and new link cost and
// averaging them.
let mut inner = self.inner.state.write().unwrap();
inner.link_cost = (((inner.link_cost as u32) * EXISTING_METRIC_FACTOR
+ (new_link_cost as u32) * (TOTAL_METRIC_DIVISOR - EXISTING_METRIC_FACTOR))
/ TOTAL_METRIC_DIVISOR) as u16;
}
/// Identifier for the connection to the `Peer`.
pub fn connection_identifier(&self) -> &String {
&self.inner.connection_identifier
}
pub fn time_last_received_ihu(&self) -> tokio::time::Instant {
self.inner.state.read().unwrap().time_last_received_ihu
}
pub fn set_time_last_received_ihu(&self, time: tokio::time::Instant) {
self.inner.state.write().unwrap().time_last_received_ihu = time
}
/// Notify this `Peer` that it died.
///
/// While some [`Connection`] types can immediately detect that the connection itself is
/// broken, not all of them can. In this scenario, we need to rely on an outside signal to tell
/// us that we have, in fact, died.
pub fn died(&self) {
self.inner.alive.store(false, Ordering::Relaxed);
self.inner.death_notifier.notify_one();
}
/// Checks if the connection of this `Peer` is still alive.
///
/// For connection types which don't have (real time) state information, this might return a
/// false positive if the connection has actually died, but the Peer did not notice this (yet)
/// and hasn't been informed.
pub fn alive(&self) -> bool {
self.inner.alive.load(Ordering::Relaxed)
}
/// Create a new [`PeerRef`] that refers to this `Peer` instance.
pub fn refer(&self) -> PeerRef {
PeerRef {
inner: Arc::downgrade(&self.inner),
}
}
}
impl PeerRef {
/// Contructs a new `PeerRef` which is not associated with any actually [`Peer`].
/// [`PeerRef::alive`] will always return false when called on this `PeerRef`.
pub fn new() -> Self {
PeerRef { inner: Weak::new() }
}
/// Check if the connection of the [`Peer`] this `PeerRef` points to is still alive.
pub fn alive(&self) -> bool {
if let Some(peer) = self.inner.upgrade() {
peer.alive.load(Ordering::Relaxed)
} else {
false
}
}
/// Attempts to convert this `PeerRef` into a full [`Peer`].
pub fn upgrade(&self) -> Option<Peer> {
self.inner.upgrade().map(|inner| Peer { inner })
}
}
impl Default for PeerRef {
fn default() -> Self {
Self::new()
}
}
impl PartialEq for Peer {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
#[derive(Debug)]
struct PeerInner {
state: RwLock<PeerState>,
to_peer_data: mpsc::UnboundedSender<DataPacket>,
to_peer_control: mpsc::UnboundedSender<ControlPacket>,
/// Used to identify peer based on its connection params.
connection_identifier: String,
/// Static cost of using this link, to be added to the announced metric for routes through this
/// Peer.
static_link_cost: u16,
/// Channel to notify the connection of its decease.
death_notifier: Arc<Notify>,
/// Keep track if the connection is alive.
alive: AtomicBool,
}
#[derive(Debug)]
struct PeerState {
hello_seqno: SeqNo,
time_last_received_hello: tokio::time::Instant,
link_cost: u16,
time_last_received_ihu: tokio::time::Instant,
}
impl PeerState {
/// Create a new `PeerInner`, holding the mutable state of a [`Peer`]
fn new() -> Self {
// Initialize last_sent_hello_seqno to 0
let hello_seqno = SeqNo::default();
let link_cost = DEFAULT_LINK_COST;
// Initialize time_last_received_hello to now
let time_last_received_hello = tokio::time::Instant::now();
// Initialiwe time_last_send_ihu
let time_last_received_ihu = tokio::time::Instant::now();
Self {
hello_seqno,
link_cost,
time_last_received_ihu,
time_last_received_hello,
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/proxy.rs | mycelium/src/proxy.rs | use std::{
collections::HashMap,
net::{Ipv6Addr, SocketAddr},
sync::{Arc, Mutex, RwLock},
};
use crate::{metrics::Metrics, router::Router};
use futures::stream::FuturesUnordered;
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::{TcpListener, TcpStream},
select,
time::{self, timeout, Duration, MissedTickBehavior},
};
use tokio_stream::StreamExt;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, trace, warn};
/// Amount of time to wait between probes for proxies.
const PROXY_PROBE_INTERVAL: Duration = Duration::from_secs(60);
/// Default IANA assigned port for Socks proxies.
/// https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=1080
const DEFAULT_SOCKS5_PORT: u16 = 1080;
/// Amount of time before we consider a probe to be dropped.
const PROBE_TIMEOUT: Duration = Duration::from_secs(5);
/// Client greeting packet in the Socks5 protocol, starts the handshake.
const SOCKS5_CLIENT_GREETING: [u8; 3] = [
0x05, // Version, 5 -> Socks5
0x01, // NAUTH, number of authentication methods supported, only 1
0x00, // AUTH, 1 byte per supported method, only 1 supported, 0 == No authentication
];
/// Server choice packet expected as reply to our client greeting while probing if a proxy is
/// present and open
const SOCKS5_EXPECTED_SERVER_CHOICE: [u8; 2] = [
0x05, // Version, 5 -> Socks5
0x00, // CAUTH, chosen authentication method, 0 since this is the only one offered
];
/// Server choice packet if the server denies your authentication
const SOCKS5_SERVER_CHOICE_DENIED: [u8; 2] = [
0x05, // Version, 5 -> Socks5
0xFF, // CAUTH, no acceptable methods
];
/// Proxy implementations scans known IPs from the [`Router`](crate::router::Router), to see if
/// there is an (open) SOCKS5 proxy listening on the default port on that IP.
#[derive(Clone)]
pub struct Proxy<M> {
router: Arc<Mutex<Router<M>>>,
proxy_cache: Arc<RwLock<HashMap<Ipv6Addr, ProxyProbeStatus>>>,
chosen_remote: Arc<Mutex<Option<SocketAddr>>>,
/// Cancellation token used for scanning routines
scan_token: Arc<Mutex<CancellationToken>>,
/// Cancellation token used for the actual proxy connections
proxy_token: Arc<Mutex<CancellationToken>>,
}
/// Status of a probe for a proxy.
#[derive(Debug)]
pub enum ProxyProbeStatus {
/// No process is listening on the probed port on the remote IP.
NotListening,
/// The last probe found a valid proxy server at this address, which does not require
/// authentication.
Valid,
/// The last probe found a valid proxy server at this address, but it requires authentication
AuthenticationRequired,
/// A process is listening but it is not a valid Socks5 proxy
NotAProxy,
}
impl<M> Proxy<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new `Proxy` implementation.
pub fn new(router: Router<M>) -> Self {
Self {
router: Arc::new(Mutex::new(router)),
proxy_cache: Arc::new(RwLock::new(HashMap::new())),
chosen_remote: Arc::new(Mutex::new(None)),
scan_token: Arc::new(Mutex::new(CancellationToken::new())),
proxy_token: Arc::new(Mutex::new(CancellationToken::new())),
}
}
/// Get a list of all known proxies we discovered.
pub fn known_proxies(&self) -> Vec<Ipv6Addr> {
self.proxy_cache
.read()
.expect("Can read lock proxy cache; qed")
.iter()
.filter_map(|(addr, s)| {
if matches!(s, ProxyProbeStatus::Valid) {
Some(*addr)
} else {
None
}
})
.collect()
}
/// Starst a background task which periodically scans the [`Router`](crate::router::Router)
/// for potential new proxies.
///
/// # Panics
///
/// Panics if not called from the context of tokio runtime.
pub fn start_probing(&self) {
info!("Start Socks5 proxy probing");
let router = self.router.clone();
let proxy_cache = self.proxy_cache.clone();
let cancel_token = self
.scan_token
.lock()
.expect("Can lock cancel_token; qed")
.clone();
tokio::spawn(async move {
let mut probe_interval = time::interval(PROXY_PROBE_INTERVAL);
probe_interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
loop {
select! {
_ = probe_interval.tick() => {
debug!("Starting proxy probes");
},
_ = cancel_token.cancelled() => {
break
}
}
let routes = router
.lock()
.expect("Can lock router; qed")
.load_selected_routes();
for route in routes {
let proxy_cache = proxy_cache.clone();
tokio::spawn(async move {
let address = route.source().router_id().to_pubkey().address();
debug!(%address, "Probing Socks5 proxy");
if timeout(PROBE_TIMEOUT, async {
let mut stream =
match TcpStream::connect((address, DEFAULT_SOCKS5_PORT)).await {
Ok(stream) => stream,
Err(err) => {
trace!(%address, %err, "Failed to connect");
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::NotListening);
return
}
};
trace!(%address, "Connection established");
if let Err(err) = stream.write_all(&SOCKS5_CLIENT_GREETING).await {
trace!(%address, %err, "Failed to write greeting to remote");
// If we can't write to the remote, assume nothing is listening
// there.
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::NotListening);
return
}
let mut recv_buf = [0; 2];
// We can use read exact here since we are wrapped in a timeout, so
// this eventually ends even if the remote does not send data.
if let Err(err) = stream.read_exact(&mut recv_buf).await {
trace!(%address, %err, "Failed to read server reply from remote");
// If we can't read from the remote, assume nothing is listening
// there. (At least nothing valid)
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::NotListening);
return
}
match recv_buf {
SOCKS5_EXPECTED_SERVER_CHOICE => {
debug!(%address, "Valid open Socks5 server found");
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::Valid);
}
SOCKS5_SERVER_CHOICE_DENIED => {
debug!(%address, "Valid Socks5 server found, but it requires authentication");
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::AuthenticationRequired);
}
_ => {
debug!(%address, "Reply does not match the expected reply format of a Socks5 proxy");
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::NotAProxy);
}
}
})
.await.is_err() {
debug!(%address, "Timeout probing proxy");
proxy_cache
.write()
.expect("Proxy cache can be write locked; qed")
.insert(address, ProxyProbeStatus::NotListening);
}
});
}
}
});
}
/// Stops any ongoing probes.
pub fn stop_probing(&self) {
info!("Stopping Socks5 proxy probing");
self.scan_token
.lock()
.expect("Can lock cancel token; qed")
.cancel();
}
/// Connect to a remote Socks5 proxy. If a proxy address is given, connect to that one. If not, connect to the best (fastest) known proxy.
pub async fn connect(&self, remote: Option<SocketAddr>) -> Result<SocketAddr, ConnectionError> {
let target = match remote {
Some(remote) => remote,
None => {
debug!("Finding best known proxy");
// Find best proxy of our internal list by racing all proxies and finding the first
// one which gives a valid response.
let futs = FuturesUnordered::new();
for ip in self
.proxy_cache
.read()
.expect("Can read lock proxy cache; qed")
.iter()
.filter_map(|(address, ps)| {
if matches!(ps, ProxyProbeStatus::Valid) {
Some(*address)
} else {
None
}
})
{
futs.push(async move {
// It's fine to swallow errors here, we are just sanity checking
let addr: SocketAddr = (ip, DEFAULT_SOCKS5_PORT).into();
trace!(%addr, "Checking proxy availability and latency");
let mut stream = TcpStream::connect(addr).await.ok()?;
stream.write_all(&SOCKS5_CLIENT_GREETING).await.ok()?;
let mut recv_buf = [0; 2];
stream.read_exact(&mut recv_buf).await.ok()?;
match recv_buf {
SOCKS5_EXPECTED_SERVER_CHOICE => Some(addr),
_ => None,
}
});
}
let target: Option<SocketAddr> = futs.filter_map(|o| o).next().await;
if target.is_none() {
return Err(ConnectionError { _private: () });
}
// Safe since we just checked the None case above
target.unwrap()
}
};
// Now that we have a target, "connect" to it, i.e. set it as proxy destination.
debug!(%target, "Setting remote Socks5 proxy");
*self
.chosen_remote
.lock()
.expect("Can lock chosen remote; qed") = Some(target);
self.start_proxy();
Ok(target)
}
/// Disconnects from the proxy, if any is connected
pub fn disconnect(&self) {
self.proxy_token
.lock()
.expect("Can lock proxy token; qed")
.cancel();
*self
.chosen_remote
.lock()
.expect("Can lock chosen remote; qed") = None;
}
/// Starts a background task for proxying connections.
/// This spawns a listener, and proxies all connections to the chosen target.
fn start_proxy(&self) {
let target = *self
.chosen_remote
.lock()
.expect("Can lock chosen remote; qed");
if target.is_none() {
warn!("Can't start proxy if target is none, this should not happen");
return;
}
let target = target.unwrap();
info!(%target, "Starting Socks5 proxy forwarding");
// First cancel the old token, then set a new token
let mut old_token = self.proxy_token.lock().expect("Can lock proxy token; qed");
old_token.cancel();
let proxy_token = CancellationToken::new();
*old_token = proxy_token.clone();
tokio::spawn(async move {
let listener = TcpListener::bind(("::", DEFAULT_SOCKS5_PORT))
.await
.inspect_err(|err| {
error!(%err, "Could not bind TCP listener");
})?;
loop {
select! {
_ = proxy_token.cancelled() => {
debug!("Shutting down proxy listener");
break
}
stream = listener.accept() => {
match stream {
Err(err) => {
error!(%err, "Proxy listener accept error");
return Err(err)
}
Ok((mut stream, source)) => {
trace!(%source, "Got new proxy stream");
let proxy_token = proxy_token.clone();
// Unwrap is safe since we checked the none variant above
tokio::spawn(async move {
let mut con = TcpStream::connect(target).await?;
select! {
_ = proxy_token.cancelled() => {
trace!(%source, %target, "Shutting down proxy stream");
Ok(())
}
r = tokio::io::copy_bidirectional(&mut stream, &mut con) => {
match r {
Err(err) => {
trace!(%err, %source, %target, "Proxy stream finished with error");
Err(err)
}
Ok((_, _)) => {
trace!(%source, %target, "Proxy stream finished normally");
Ok(())
}
}
}
}
});
}
}
}
}
}
Ok(())
});
}
}
/// Error returned when performing an automatic Socks5 connect, but no valid remotes are found.
#[derive(Debug)]
pub struct ConnectionError {
_private: (),
}
impl std::fmt::Display for ConnectionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("No valid Socks5 proxy found to connect to")
}
}
impl std::error::Error for ConnectionError {}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/seqno_cache.rs | mycelium/src/seqno_cache.rs | //! The seqno request cache keeps track of seqno requests sent by the node. This allows us to drop
//! duplicate requests, and to notify the source of requests (if it wasn't the local node) about
//! relevant updates.
use std::{
sync::Arc,
time::{Duration, Instant},
};
use dashmap::DashMap;
use tokio::time::MissedTickBehavior;
use tracing::{debug, trace};
use crate::{peer::Peer, router_id::RouterId, sequence_number::SeqNo, subnet::Subnet};
/// The amount of time to remember a seqno request (since it was first seen), before we remove it
/// (assuming it was not removed manually before that).
const SEQNO_DEDUP_TTL: Duration = Duration::from_secs(60);
/// A sequence number request, either forwarded or originated by the local node.
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct SeqnoRequestCacheKey {
pub router_id: RouterId,
pub subnet: Subnet,
pub seqno: SeqNo,
}
impl std::fmt::Display for SeqnoRequestCacheKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"seqno {} for {} from {}",
self.seqno, self.subnet, self.router_id
)
}
}
/// Information retained for sequence number requests we've sent.
struct SeqnoForwardInfo {
/// Which peers have asked us to forward this seqno request.
sources: Vec<Peer>,
/// Which peers have we sent this request to.
targets: Vec<Peer>,
/// Time at which we first forwarded the requets.
first_sent: Instant,
/// When did we last sent a seqno request.
last_sent: Instant,
}
/// A cache for outbound seqno requests. Entries in the cache are automatically removed after a
/// certain amount of time. The cache does not account for the source table. That is, if the
/// requested seqno is smaller, it might pass the cache, but should have been blocked earlier by
/// the source table check. As such, this cache should be the last step in deciding if a seqno
/// request is forwarded.
#[derive(Clone)]
pub struct SeqnoCache {
/// Actual cache wrapped in an Arc to make it sharaeble.
cache: Arc<DashMap<SeqnoRequestCacheKey, SeqnoForwardInfo, ahash::RandomState>>,
}
impl SeqnoCache {
/// Create a new [`SeqnoCache`].
pub fn new() -> Self {
trace!(capacity = 0, "Creating new seqno cache");
let cache = Arc::new(DashMap::with_hasher_and_shard_amount(
ahash::RandomState::new(),
// This number has been chosen completely at random
1024,
));
let sc = Self { cache };
// Spawn background cleanup task.
tokio::spawn(sc.clone().sweep_entries());
sc
}
/// Record a forwarded seqno request to a given target. Also keep track of the origin of the
/// request. If the local node generated the request, source must be [`None`]
pub fn forward(&self, request: SeqnoRequestCacheKey, target: Peer, source: Option<Peer>) {
let mut info = self.cache.entry(request).or_default();
info.last_sent = Instant::now();
if !info.targets.contains(&target) {
info.targets.push(target);
} else {
debug!(
seqno_request = %request,
"Already sent seqno request to target {}",
target.connection_identifier()
);
}
if let Some(source) = source {
if !info.sources.contains(&source) {
info.sources.push(source);
} else {
debug!(seqno_request = %request, "Peer {} is requesting the same seqno again", source.connection_identifier());
}
}
}
/// Get a list of all peers which we've already sent the given seqno request to, as well as
/// when we've last sent a request.
pub fn info(&self, request: &SeqnoRequestCacheKey) -> Option<(Instant, Vec<Peer>)> {
self.cache
.get(request)
.map(|info| (info.last_sent, info.targets.clone()))
}
/// Removes forwarding info from the seqno cache. If forwarding info is available, the source
/// peers (peers which requested us to forward this request) are returned.
// TODO: cleanup if needed
#[allow(dead_code)]
pub fn remove(&self, request: &SeqnoRequestCacheKey) -> Option<Vec<Peer>> {
self.cache.remove(request).map(|(_, info)| info.sources)
}
/// Get forwarding info from the seqno cache. If forwarding info is available, the source
/// peers (peers which requested us to forward this request) are returned.
// TODO: cleanup if needed
#[allow(dead_code)]
pub fn get(&self, request: &SeqnoRequestCacheKey) -> Option<Vec<Peer>> {
self.cache.get(request).map(|info| info.sources.clone())
}
/// Periodic task to clear old entries for which no reply came in.
async fn sweep_entries(self) {
let mut interval = tokio::time::interval(SEQNO_DEDUP_TTL);
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
loop {
interval.tick().await;
debug!("Cleaning up expired seqno requests from seqno cache");
let prev_entries = self.cache.len();
let prev_cap = self.cache.capacity();
self.cache
.retain(|_, info| info.first_sent.elapsed() <= SEQNO_DEDUP_TTL);
self.cache.shrink_to_fit();
debug!(
cleaned_entries = prev_entries - self.cache.len(),
removed_capacity = prev_cap - self.cache.capacity(),
"Cleaned up stale seqno request cache entries"
);
}
}
}
impl Default for SeqnoCache {
fn default() -> Self {
Self::new()
}
}
impl Default for SeqnoForwardInfo {
fn default() -> Self {
Self {
sources: vec![],
targets: vec![],
first_sent: Instant::now(),
last_sent: Instant::now(),
}
}
}
impl std::fmt::Debug for SeqnoRequestCacheKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SeqnoRequestCacheKey")
.field("router_id", &self.router_id.to_string())
.field("subnet", &self.subnet.to_string())
.field("seqno", &self.seqno.to_string())
.finish()
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/endpoint.rs | mycelium/src/endpoint.rs | use std::{
fmt,
net::{AddrParseError, SocketAddr},
str::FromStr,
};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq)]
/// Error generated while processing improperly formatted endpoints.
pub enum EndpointParseError {
/// An address was specified without leading protocol information.
MissingProtocol,
/// An endpoint was specified using a protocol we (currently) do not understand.
UnknownProtocol,
/// Error while parsing the specific address.
Address(AddrParseError),
}
/// Protocol used by an endpoint.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Protocol {
/// Standard plain text Tcp.
Tcp,
/// Tls 1.3 with PSK over Tcp.
Tls,
/// Quic protocol (over UDP).
Quic,
}
/// An endpoint defines a address and a protocol to use when communicating with it.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Endpoint {
proto: Protocol,
socket_addr: SocketAddr,
}
impl Endpoint {
/// Create a new `Endpoint` with given [`Protocol`] and address.
pub fn new(proto: Protocol, socket_addr: SocketAddr) -> Self {
Self { proto, socket_addr }
}
/// Get the [`Protocol`] used by this `Endpoint`.
pub fn proto(&self) -> Protocol {
self.proto
}
/// Get the [`SocketAddr`] used by this `Endpoint`.
pub fn address(&self) -> SocketAddr {
self.socket_addr
}
}
impl FromStr for Endpoint {
type Err = EndpointParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once("://") {
None => Err(EndpointParseError::MissingProtocol),
Some((proto, socket)) => {
let proto = match proto.to_lowercase().as_str() {
"tcp" => Protocol::Tcp,
"quic" => Protocol::Quic,
"tls" => Protocol::Tls,
_ => return Err(EndpointParseError::UnknownProtocol),
};
let socket_addr = SocketAddr::from_str(socket)?;
Ok(Endpoint { proto, socket_addr })
}
}
}
}
impl fmt::Display for Endpoint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{} {}", self.proto, self.socket_addr))
}
}
impl fmt::Display for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Self::Tcp => "Tcp",
Self::Tls => "Tls",
Self::Quic => "Quic",
})
}
}
impl fmt::Display for EndpointParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::MissingProtocol => f.write_str("missing leading protocol identifier"),
Self::UnknownProtocol => f.write_str("protocol for endpoint is not supported"),
Self::Address(e) => f.write_fmt(format_args!("failed to parse address: {e}")),
}
}
}
impl std::error::Error for EndpointParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Address(e) => Some(e),
_ => None,
}
}
}
impl From<AddrParseError> for EndpointParseError {
fn from(value: AddrParseError) -> Self {
Self::Address(value)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/tun/ios.rs | mycelium/src/tun/ios.rs | //! ios specific tun interface setup.
use std::io::{self, IoSlice};
use futures::{Sink, Stream};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
select,
sync::mpsc,
};
use tracing::{error, info};
use crate::crypto::PacketBuffer;
use crate::tun::TunConfig;
// TODO
const LINK_MTU: i32 = 1400;
/// The 4 byte packet header written before a packet is sent on the TUN
// TODO: figure out structure and values, but for now this seems to work.
const HEADER: [u8; 4] = [0, 0, 0, 30];
/// Create a new tun interface and set required routes
///
/// # Panics
///
/// This function will panic if called outside of the context of a tokio runtime.
pub async fn new(
tun_config: TunConfig,
) -> Result<
(
impl Stream<Item = io::Result<PacketBuffer>>,
impl Sink<PacketBuffer, Error = impl std::error::Error> + Clone,
),
Box<dyn std::error::Error>,
> {
let mut tun = create_tun_interface(tun_config.tun_fd)?;
let (tun_sink, mut sink_receiver) = mpsc::channel::<PacketBuffer>(1000);
let (tun_stream, stream_receiver) = mpsc::unbounded_channel();
// Spawn a single task to manage the TUN interface
tokio::spawn(async move {
let mut buf_hold = None;
loop {
let mut buf: PacketBuffer = buf_hold.take().unwrap_or_default();
select! {
data = sink_receiver.recv() => {
match data {
None => return,
Some(data) => {
// We need to append a 4 byte header here
if let Err(e) = tun.write_vectored(&[IoSlice::new(&HEADER), IoSlice::new(&data)]).await {
error!("Failed to send data to tun interface {e}");
}
}
}
// Save the buffer as we didn't use it
buf_hold = Some(buf);
}
read_result = tun.read(buf.buffer_mut()) => {
let rr = read_result.map(|n| {
buf.set_size(n);
// Trim header
buf.buffer_mut().copy_within(4.., 0);
buf.set_size(n-4);
buf
});
if tun_stream.send(rr).is_err() {
error!("Could not forward data to tun stream, receiver is gone");
break;
};
}
}
}
info!("Stop reading from / writing to tun interface");
});
Ok((
tokio_stream::wrappers::UnboundedReceiverStream::new(stream_receiver),
tokio_util::sync::PollSender::new(tun_sink),
))
}
/// Create a new TUN interface
fn create_tun_interface(tun_fd: i32) -> Result<tun::AsyncDevice, Box<dyn std::error::Error>> {
let mut config = tun::Configuration::default();
config
.layer(tun::Layer::L3)
.mtu(LINK_MTU)
.queues(1)
.raw_fd(tun_fd)
.up();
let tun = tun::create_as_async(&config)?;
Ok(tun)
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/tun/linux.rs | mycelium/src/tun/linux.rs | //! Linux specific tun interface setup.
use std::io;
use futures::{Sink, Stream, TryStreamExt};
use rtnetlink::Handle;
use tokio::{select, sync::mpsc};
use tokio_tun::{Tun, TunBuilder};
use tracing::{error, info};
use crate::crypto::PacketBuffer;
use crate::subnet::Subnet;
use crate::tun::TunConfig;
// TODO
const LINK_MTU: i32 = 1400;
/// Create a new tun interface and set required routes
///
/// # Panics
///
/// This function will panic if called outside of the context of a tokio runtime.
pub async fn new(
tun_config: TunConfig,
) -> Result<
(
impl Stream<Item = io::Result<PacketBuffer>>,
impl Sink<PacketBuffer, Error = impl std::error::Error> + Clone,
),
Box<dyn std::error::Error>,
> {
let tun = match create_tun_interface(&tun_config.name) {
Ok(tun) => tun,
Err(e) => {
error!(
"Could not create tun device named \"{}\", make sure the name is not yet in use, and you have sufficient privileges to create a network device",
tun_config.name,
);
return Err(e);
}
};
let (conn, handle, _) = rtnetlink::new_connection()?;
let netlink_task_handle = tokio::spawn(conn);
let tun_index = link_index_by_name(handle.clone(), tun_config.name).await?;
if let Err(e) = add_address(
handle.clone(),
tun_index,
Subnet::new(
tun_config.node_subnet.address(),
tun_config.route_subnet.prefix_len(),
)
.unwrap(),
)
.await
{
error!(
"Failed to add address {0} to TUN interface: {e}",
tun_config.node_subnet
);
return Err(e);
}
// We are done with our netlink connection, abort the task so we can properly clean up.
netlink_task_handle.abort();
let (tun_sink, mut sink_receiver) = mpsc::channel::<PacketBuffer>(1000);
let (tun_stream, stream_receiver) = mpsc::unbounded_channel();
// Spawn a single task to manage the TUN interface
tokio::spawn(async move {
let mut buf_hold = None;
loop {
let mut buf: PacketBuffer = buf_hold.take().unwrap_or_default();
select! {
data = sink_receiver.recv() => {
match data {
None => return,
Some(data) => {
if let Err(e) = tun.send(&data).await {
error!("Failed to send data to tun interface {e}");
}
}
}
// Save the buffer as we didn't use it
buf_hold = Some(buf);
}
read_result = tun.recv(buf.buffer_mut()) => {
let rr = read_result.map(|n| {
buf.set_size(n);
buf
});
if tun_stream.send(rr).is_err() {
error!("Could not forward data to tun stream, receiver is gone");
break;
};
}
}
}
info!("Stop reading from / writing to tun interface");
});
Ok((
tokio_stream::wrappers::UnboundedReceiverStream::new(stream_receiver),
tokio_util::sync::PollSender::new(tun_sink),
))
}
/// Create a new TUN interface
fn create_tun_interface(name: &str) -> Result<Tun, Box<dyn std::error::Error>> {
let tun = TunBuilder::new()
.name(name)
.mtu(LINK_MTU)
.queues(1)
.up()
.build()?
.pop()
.expect("Succesfully build tun interface has 1 queue");
Ok(tun)
}
/// Retrieve the link index of an interface with the given name
async fn link_index_by_name(
handle: Handle,
name: String,
) -> Result<u32, Box<dyn std::error::Error>> {
handle
.link()
.get()
.match_name(name)
.execute()
.try_next()
.await?
.map(|link_message| link_message.header.index)
.ok_or(io::Error::new(io::ErrorKind::NotFound, "link not found").into())
}
/// Add an address to an interface.
///
/// The kernel will automatically add a route entry for the subnet assigned to the interface.
async fn add_address(
handle: Handle,
link_index: u32,
subnet: Subnet,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(handle
.address()
.add(link_index, subnet.address(), subnet.prefix_len())
.execute()
.await?)
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/tun/android.rs | mycelium/src/tun/android.rs | //! android specific tun interface setup.
use std::io::{self};
use futures::{Sink, Stream};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
select,
sync::mpsc,
};
use tracing::{error, info};
use crate::crypto::PacketBuffer;
use crate::tun::TunConfig;
// TODO
const LINK_MTU: i32 = 1400;
/// Create a new tun interface and set required routes
///
/// # Panics
///
/// This function will panic if called outside of the context of a tokio runtime.
pub async fn new(
tun_config: TunConfig,
) -> Result<
(
impl Stream<Item = io::Result<PacketBuffer>>,
impl Sink<PacketBuffer, Error = impl std::error::Error> + Clone,
),
Box<dyn std::error::Error>,
> {
let name = "tun0";
let mut tun = create_tun_interface(name, tun_config.tun_fd)?;
let (tun_sink, mut sink_receiver) = mpsc::channel::<PacketBuffer>(1000);
let (tun_stream, stream_receiver) = mpsc::unbounded_channel();
// Spawn a single task to manage the TUN interface
tokio::spawn(async move {
let mut buf_hold = None;
loop {
let mut buf = if let Some(buf) = buf_hold.take() {
buf
} else {
PacketBuffer::new()
};
select! {
data = sink_receiver.recv() => {
match data {
None => return,
Some(data) => {
if let Err(e) = tun.write(&data).await {
error!("Failed to send data to tun interface {e}");
}
}
}
// Save the buffer as we didn't use it
buf_hold = Some(buf);
}
read_result = tun.read(buf.buffer_mut()) => {
let rr = read_result.map(|n| {
buf.set_size(n);
buf
});
if tun_stream.send(rr).is_err() {
error!("Could not forward data to tun stream, receiver is gone");
break;
};
}
}
}
info!("Stop reading from / writing to tun interface");
});
Ok((
tokio_stream::wrappers::UnboundedReceiverStream::new(stream_receiver),
tokio_util::sync::PollSender::new(tun_sink),
))
}
/// Create a new TUN interface
fn create_tun_interface(
name: &str,
tun_fd: i32,
) -> Result<tun::AsyncDevice, Box<dyn std::error::Error>> {
let mut config = tun::Configuration::default();
config
.name(name)
.layer(tun::Layer::L3)
.mtu(LINK_MTU)
.queues(1)
.raw_fd(tun_fd)
.up();
info!("create_tun_interface");
let tun = match tun::create_as_async(&config) {
Ok(tun) => tun,
Err(err) => {
error!("[android]failed to create tun interface: {err}");
return Err(Box::new(err));
}
};
Ok(tun)
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/tun/windows.rs | mycelium/src/tun/windows.rs | use std::{io, ops::Deref, sync::Arc};
use futures::{Sink, Stream};
use tokio::sync::mpsc;
use tracing::{error, info, warn};
use crate::tun::TunConfig;
use crate::{crypto::PacketBuffer, subnet::Subnet};
// TODO
const LINK_MTU: usize = 1400;
/// Type of the tunnel used, specified when creating the tunnel.
const WINDOWS_TUNNEL_TYPE: &str = "Mycelium";
pub async fn new(
tun_config: TunConfig,
) -> Result<
(
impl Stream<Item = io::Result<PacketBuffer>>,
impl Sink<PacketBuffer, Error = impl std::error::Error> + Clone,
),
Box<dyn std::error::Error>,
> {
// SAFETY: for now we assume a valid wintun.dll file exists in the root directory when we are
// running this.
let wintun = unsafe { wintun::load() }?;
let wintun_version = match wintun::get_running_driver_version(&wintun) {
Ok(v) => format!("{v}"),
Err(e) => {
warn!("Failed to read wintun.dll version: {e}");
"Unknown".to_string()
}
};
info!("Loaded wintun.dll - running version {wintun_version}");
let tun = wintun::Adapter::create(&wintun, &tun_config.name, WINDOWS_TUNNEL_TYPE, None)?;
info!("Created wintun tunnel interface");
// Configure created network adapter.
set_adapter_mtu(&tun_config.name, LINK_MTU)?;
// Set address, this will use a `netsh` command under the hood unfortunately.
// TODO: fix in library
// tun.set_network_addresses_tuple(node_subnet.address(), route_subnet.mask(), None)?;
add_address(
&tun_config.name,
tun_config.node_subnet,
tun_config.route_subnet,
)?;
// Build 2 separate sessions - one for receiving, one for sending.
let rx_session = Arc::new(tun.start_session(wintun::MAX_RING_CAPACITY)?);
let tx_session = rx_session.clone();
let (tun_sink, mut sink_receiver) = mpsc::channel::<PacketBuffer>(1000);
let (tun_stream, stream_receiver) = mpsc::unbounded_channel();
// Ingress path
tokio::task::spawn_blocking(move || {
loop {
let packet = rx_session
.receive_blocking()
.map(|tun_packet| {
let mut buffer = PacketBuffer::new();
// SAFETY: The configured MTU is smaller than the static PacketBuffer size.
let packet_len = tun_packet.bytes().len();
buffer.buffer_mut()[..packet_len].copy_from_slice(tun_packet.bytes());
buffer.set_size(packet_len);
buffer
})
.map_err(wintun_to_io_error);
if tun_stream.send(packet).is_err() {
error!("Could not forward data to tun stream, receiver is gone");
break;
};
}
info!("Stop reading from tun interface");
});
// Egress path
tokio::task::spawn_blocking(move || {
loop {
match sink_receiver.blocking_recv() {
None => break,
Some(data) => {
let mut tun_packet =
match tx_session.allocate_send_packet(data.deref().len() as u16) {
Ok(tun_packet) => tun_packet,
Err(e) => {
error!("Could not allocate packet on TUN: {e}");
break;
}
};
// SAFETY: packet allocation is done on the length of &data.
tun_packet.bytes_mut().copy_from_slice(&data);
tx_session.send_packet(tun_packet);
}
}
}
info!("Stop writing to tun interface");
});
Ok((
tokio_stream::wrappers::UnboundedReceiverStream::new(stream_receiver),
tokio_util::sync::PollSender::new(tun_sink),
))
}
/// Helper method to convert a [`wintun::Error`] to a [`std::io::Error`].
fn wintun_to_io_error(err: wintun::Error) -> io::Error {
match err {
wintun::Error::Io(e) => e,
_ => io::Error::other("unknown wintun error"),
}
}
/// Set an address on an interface by shelling out to `netsh`
///
/// We assume this is an IPv6 address.
fn add_address(adapter_name: &str, subnet: Subnet, route_subnet: Subnet) -> Result<(), io::Error> {
let exit_code = std::process::Command::new("netsh")
.args([
"interface",
"ipv6",
"set",
"address",
adapter_name,
&format!("{}/{}", subnet.address(), route_subnet.prefix_len()),
])
.spawn()?
.wait()?;
match exit_code.code() {
Some(0) => Ok(()),
Some(x) => Err(io::Error::from_raw_os_error(x)),
None => {
warn!("Failed to determine `netsh` exit status");
Ok(())
}
}
}
fn set_adapter_mtu(name: &str, mtu: usize) -> Result<(), io::Error> {
let args = &[
"interface",
"ipv6",
"set",
"subinterface",
&format!("\"{name}\""),
&format!("mtu={mtu}"),
"store=persistent",
];
let exit_code = std::process::Command::new("netsh")
.args(args)
.spawn()?
.wait()?;
match exit_code.code() {
Some(0) => Ok(()),
Some(x) => Err(io::Error::from_raw_os_error(x)),
None => {
warn!("Failed to determine `netsh` exit status");
Ok(())
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/tun/darwin.rs | mycelium/src/tun/darwin.rs | //! macos specific tun interface setup.
use std::{
ffi::CString,
io::{self, IoSlice},
net::{IpAddr, Ipv6Addr},
os::fd::AsRawFd,
str::FromStr,
};
use futures::{Sink, Stream};
use nix::sys::socket::SockaddrIn6;
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
select,
sync::mpsc,
};
use tracing::{debug, error, info, warn};
use crate::crypto::PacketBuffer;
use crate::subnet::Subnet;
use crate::tun::TunConfig;
// TODO
const LINK_MTU: i32 = 1400;
/// The 4 byte packet header written before a packet is sent on the TUN
// TODO: figure out structure and values, but for now this seems to work.
const HEADER: [u8; 4] = [0, 0, 0, 30];
const IN6_IFF_NODAD: u32 = 0x0020; // netinet6/in6_var.h
const IN6_IFF_SECURED: u32 = 0x0400; // netinet6/in6_var.h
const ND6_INFINITE_LIFETIME: u32 = 0xFFFFFFFF; // netinet6/nd6.h
/// Wrapper for an OS-specific interface name
// Allways hold the max size of an interface. This includes the 0 byte for termination.
// repr transparent so this can be used with libc calls.
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct IfaceName([libc::c_char; libc::IFNAMSIZ as _]);
/// Wrapped interface handle.
#[derive(Clone, Copy)]
struct Iface {
/// Name of the interface
iface_name: IfaceName,
}
/// Struct to add IPv6 route to interface
#[repr(C)]
pub struct IfaliasReq {
ifname: IfaceName,
addr: SockaddrIn6,
dst_addr: SockaddrIn6,
mask: SockaddrIn6,
flags: u32,
lifetime: AddressLifetime,
}
#[repr(C)]
pub struct AddressLifetime {
/// Not used for userspace -> kernel space
expire: libc::time_t,
/// Not used for userspace -> kernel space
preferred: libc::time_t,
vltime: u32,
pltime: u32,
}
/// Create a new tun interface and set required routes
///
/// # Panics
///
/// This function will panic if called outside of the context of a tokio runtime.
pub async fn new(
tun_config: TunConfig,
) -> Result<
(
impl Stream<Item = io::Result<PacketBuffer>>,
impl Sink<PacketBuffer, Error = impl std::error::Error> + Clone,
),
Box<dyn std::error::Error>,
> {
let tun_name = find_available_utun_name(&tun_config.name)?;
let mut tun = match create_tun_interface(&tun_name) {
Ok(tun) => tun,
Err(e) => {
error!(tun_name=%tun_name, err=%e, "Could not create TUN device. Make sure the name is not yet in use, and you have sufficient privileges to create a network device");
return Err(e);
}
};
let iface = Iface::by_name(&tun_name)?;
iface.add_address(tun_config.node_subnet, tun_config.route_subnet)?;
let (tun_sink, mut sink_receiver) = mpsc::channel::<PacketBuffer>(1000);
let (tun_stream, stream_receiver) = mpsc::unbounded_channel();
// Spawn a single task to manage the TUN interface
tokio::spawn(async move {
let mut buf_hold = None;
loop {
let mut buf: PacketBuffer = buf_hold.take().unwrap_or_default();
select! {
data = sink_receiver.recv() => {
match data {
None => return,
Some(data) => {
// We need to append a 4 byte header here
if let Err(e) = tun.write_vectored(&[IoSlice::new(&HEADER), IoSlice::new(&data)]).await {
error!("Failed to send data to tun interface {e}");
}
}
}
// Save the buffer as we didn't use it
buf_hold = Some(buf);
}
read_result = tun.read(buf.buffer_mut()) => {
let rr = read_result.map(|n| {
buf.set_size(n);
// Trim header
buf.buffer_mut().copy_within(4.., 0);
buf.set_size(n-4);
buf
});
// On Mac, packets from the tun ip to the tun ip (e.g. ping tun ip) need to be
// passed explicitly to the TUN interface again for them to be picked up by the
// kernel, since they are not routed on the loopback interface.
if let Ok(ref buf) = rr {
if buf.len() >= 40 && tun_config.node_subnet.contains_ip(IpAddr::V6(Ipv6Addr::from(<[u8;16] as TryFrom<&[u8]>>::try_from(&buf[24..40]).expect("Valid 16 byte buffer; qed")))) {
if let Err(err) = tun.write_vectored(&[IoSlice::new(&HEADER), IoSlice::new(&buf)]).await {
error!(%err, "Failed to send data to tun interface");
}
}
}
if tun_stream.send(rr).is_err() {
error!("Could not forward data to tun stream, receiver is gone");
break;
};
}
}
}
info!("Stop reading from / writing to tun interface");
});
Ok((
tokio_stream::wrappers::UnboundedReceiverStream::new(stream_receiver),
tokio_util::sync::PollSender::new(tun_sink),
))
}
/// Checks if a name is valid for a utun interface
///
/// Rules:
/// - must start with "utun"
/// - followed by only digits
/// - 15 chars total at most
fn validate_utun_name(input: &str) -> bool {
if input.len() > 15 {
return false;
}
if !input.starts_with("utun") {
return false;
}
input
.strip_prefix("utun")
.expect("We just checked that name starts with 'utun' so this is always some")
.parse::<u64>()
.is_ok()
}
/// Validates the user-supplied TUN interface name
///
/// - If the name is valid and not in use, it will be the TUN name
/// - If the name is valid but already in use, an error will be thrown
/// - If the name is not valid, we try to find the first freely available TUN name
fn find_available_utun_name(preferred_name: &str) -> Result<String, io::Error> {
// Get the list of existing utun interfaces.
let interfaces = netdev::get_interfaces();
let utun_interfaces: Vec<_> = interfaces
.iter()
.filter_map(|iface| {
if iface.name.starts_with("utun") {
Some(iface.name.as_str())
} else {
None
}
})
.collect();
// Check if the preferred name is valid and not in use.
if validate_utun_name(preferred_name) && !utun_interfaces.contains(&preferred_name) {
return Ok(preferred_name.to_string());
}
// If the preferred name is invalid or already in use, find the first available utun name.
if !validate_utun_name(preferred_name) {
warn!(tun_name=%preferred_name, "Invalid TUN name. Looking for the first available TUN name");
} else {
warn!(tun_name=%preferred_name, "TUN name already in use. Looking for the next available TUN name.");
}
// Extract and sort the utun numbers.
let mut utun_numbers = utun_interfaces
.iter()
.filter_map(|iface| iface[4..].parse::<usize>().ok())
.collect::<Vec<_>>();
utun_numbers.sort_unstable();
// Find the first available utun index.
let mut first_free_index = 0;
for (i, &num) in utun_numbers.iter().enumerate() {
if num != i {
first_free_index = i;
break;
}
first_free_index = i + 1;
}
// Create new utun name based on the first free index.
let new_utun_name = format!("utun{}", first_free_index);
if validate_utun_name(&new_utun_name) {
info!(tun_name=%new_utun_name, "Automatically assigned TUN name.");
Ok(new_utun_name)
} else {
error!("No available TUN name found");
Err(io::Error::new(
io::ErrorKind::Other,
"No available TUN name",
))
}
}
/// Create a new TUN interface
fn create_tun_interface(name: &str) -> Result<tun::AsyncDevice, Box<dyn std::error::Error>> {
let mut config = tun::Configuration::default();
config
.name(name)
.layer(tun::Layer::L3)
.mtu(LINK_MTU)
.queues(1)
.up();
let tun = tun::create_as_async(&config)?;
Ok(tun)
}
impl IfaceName {
fn as_ptr(&self) -> *const libc::c_char {
self.0.as_ptr()
}
}
impl FromStr for IfaceName {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Equal len is not allowed because we need to add the 0 byte terminator.
if s.len() >= libc::IFNAMSIZ {
return Err("Interface name too long");
}
// TODO: Is this err possible in a &str?
let raw_name = CString::new(s).map_err(|_| "Interface name contains 0 byte")?;
let mut backing = [0; libc::IFNAMSIZ];
let name_bytes = raw_name.to_bytes_with_nul();
backing[..name_bytes.len()].copy_from_slice(name_bytes);
// SAFETY: This doesn't do any weird things with the bits when converting from u8 to i8
let backing = unsafe { std::mem::transmute::<[u8; 16], [i8; 16]>(backing) };
Ok(Self(backing))
}
}
impl Iface {
/// Retrieve the link index of an interface with the given name
fn by_name(name: &str) -> Result<Iface, Box<dyn std::error::Error>> {
let iface_name: IfaceName = name.parse()?;
match unsafe { libc::if_nametoindex(iface_name.as_ptr()) } {
0 => Err(std::io::Error::new(
std::io::ErrorKind::NotFound,
"interface not found",
))?,
_ => Ok(Iface { iface_name }),
}
}
/// Add an address to an interface.
///
/// # Panics
///
/// Only IPv6 is supported, this function will panic when adding an IPv4 subnet.
fn add_address(
&self,
subnet: Subnet,
route_subnet: Subnet,
) -> Result<(), Box<dyn std::error::Error>> {
let addr = if let IpAddr::V6(addr) = subnet.address() {
addr
} else {
panic!("IPv4 subnets are not supported");
};
let mask_addr = if let IpAddr::V6(mask) = route_subnet.mask() {
mask
} else {
// We already know we are IPv6 here
panic!("IPv4 routes are not supported");
};
let sock_addr = SockaddrIn6::from(std::net::SocketAddrV6::new(addr, 0, 0, 0));
let mask = SockaddrIn6::from(std::net::SocketAddrV6::new(mask_addr, 0, 0, 0));
let req = IfaliasReq {
ifname: self.iface_name,
addr: sock_addr,
// SAFETY: kernel expects this to be fully zeroed
dst_addr: unsafe { std::mem::zeroed() },
mask,
flags: IN6_IFF_NODAD | IN6_IFF_SECURED,
lifetime: AddressLifetime {
expire: 0,
preferred: 0,
vltime: ND6_INFINITE_LIFETIME,
pltime: ND6_INFINITE_LIFETIME,
},
};
let sock = random_socket()?;
match unsafe { siocaifaddr_in6(sock.as_raw_fd(), &req) } {
Err(e) => {
error!("Failed to add ipv6 addresst to interface {e}");
Err(std::io::Error::last_os_error())?
}
Ok(_) => {
debug!("Added {subnet} to tun interfacel");
Ok(())
}
}
}
}
// Create a socket to talk to the kernel.
fn random_socket() -> Result<std::net::UdpSocket, std::io::Error> {
std::net::UdpSocket::bind("[::1]:0")
}
nix::ioctl_write_ptr!(
/// Add an IPv6 subnet to an interface.
siocaifaddr_in6,
b'i',
26,
IfaliasReq
);
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/iter_mut.rs | mycelium/src/routing_table/iter_mut.rs | use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use tracing::trace;
use crate::subnet::Subnet;
use super::{
subnet_entry::SubnetEntry, RouteKey, RouteList, RoutingTableInner, RoutingTableOplogEntry,
};
use std::{
net::Ipv6Addr,
sync::{Arc, MutexGuard},
};
/// An iterator over a [`routing table`](super::RoutingTable), yielding mutable access to the
/// entries in the table.
pub struct RoutingTableIterMut<'a, 'b> {
write_guard:
&'b mut MutexGuard<'a, left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>,
iter: ip_network_table_deps_treebitmap::Iter<'b, Ipv6Addr, Arc<SubnetEntry>>,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
}
impl<'a, 'b> RoutingTableIterMut<'a, 'b> {
pub(super) fn new(
write_guard: &'b mut MutexGuard<
'a,
left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>,
>,
iter: ip_network_table_deps_treebitmap::Iter<'b, Ipv6Addr, Arc<SubnetEntry>>,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
) -> Self {
Self {
write_guard,
iter,
expired_route_entry_sink,
cancel_token,
}
}
/// Get the next item in this iterator. This is not implemented as the [`Iterator`] trait,
/// since we hand out items which are lifetime bound to this struct.
pub fn next<'c>(&'c mut self) -> Option<(Subnet, RoutingTableIterMutEntry<'a, 'c>)> {
for (ip, prefix_size, rl) in self.iter.by_ref() {
if matches!(&**rl, SubnetEntry::Exists { .. }) {
let subnet = Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets");
return Some((
subnet,
RoutingTableIterMutEntry {
writer: self.write_guard,
store: Arc::clone(rl),
subnet,
expired_route_entry_sink: self.expired_route_entry_sink.clone(),
cancellation_token: self.cancel_token.clone(),
},
));
};
}
None
}
}
/// A smart pointer giving mutable access to a [`RouteList`].
pub struct RoutingTableIterMutEntry<'a, 'b> {
writer:
&'b mut MutexGuard<'a, left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>,
/// Owned copy of the RouteList, this is populated once mutable access the the RouteList has
/// been requested.
store: Arc<SubnetEntry>,
/// The subnet we are writing to.
subnet: Subnet,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancellation_token: CancellationToken,
}
impl RoutingTableIterMutEntry<'_, '_> {
/// Updates the routes for this entry
pub fn update_routes<F: FnMut(&mut RouteList, &mpsc::Sender<RouteKey>, &CancellationToken)>(
&mut self,
mut op: F,
) {
let mut delete = false;
if let SubnetEntry::Exists { list } = &*self.store {
list.rcu(|rl| {
let mut new_val = rl.clone();
let v = Arc::make_mut(&mut new_val);
op(v, &self.expired_route_entry_sink, &self.cancellation_token);
delete = v.is_empty();
new_val
});
if delete {
trace!(subnet = %self.subnet, "Queue subnet for deletion since route list is now empty");
self.writer
.append(RoutingTableOplogEntry::Delete(self.subnet));
}
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/route_list.rs | mycelium/src/routing_table/route_list.rs | use std::{
ops::{Deref, DerefMut, Index, IndexMut},
sync::Arc,
};
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error};
use crate::{crypto::SharedSecret, peer::Peer, task::AbortHandle};
use super::{RouteEntry, RouteKey};
/// The RouteList holds all routes for a specific subnet.
// By convention, if a route is selected, it will always be at index 0 in the list.
#[derive(Clone)]
pub struct RouteList {
list: Vec<(Arc<AbortHandle>, RouteEntry)>,
shared_secret: SharedSecret,
}
impl RouteList {
/// Create a new empty RouteList
pub(crate) fn new(shared_secret: SharedSecret) -> Self {
Self {
list: Vec::new(),
shared_secret,
}
}
/// Returns the [`SharedSecret`] used for encryption of packets to and from the associated
/// [`Subnet`].
#[inline]
pub fn shared_secret(&self) -> &SharedSecret {
&self.shared_secret
}
/// Checks if there are any actual routes in the list.
#[inline]
pub fn is_empty(&self) -> bool {
self.list.is_empty()
}
/// Returns the selected route for the [`Subnet`] this is the `RouteList` for, if one exists.
pub fn selected(&self) -> Option<&RouteEntry> {
self.list
.first()
.map(|(_, re)| re)
.and_then(|re| if re.selected() { Some(re) } else { None })
}
/// Returns an iterator over the `RouteList`.
///
/// The iterator yields all [`route entries`](RouteEntry) in the list.
pub fn iter(&self) -> RouteListIter<'_> {
RouteListIter::new(self)
}
/// Returns an iterator over the `RouteList` yielding mutable access to the elements.
///
/// The iterator yields all [`route entries`](RouteEntry) in the list.
pub fn iter_mut(&mut self) -> impl Iterator<Item = RouteGuard<'_>> {
self.list.iter_mut().map(|item| RouteGuard { item })
}
/// Removes a [`RouteEntry`] from the `RouteList`.
///
/// This does nothing if the neighbour does not exist.
pub fn remove(&mut self, neighbour: &Peer) {
let Some(pos) = self
.list
.iter()
.position(|re| re.1.neighbour() == neighbour)
else {
return;
};
let old = self.list.swap_remove(pos);
old.0.abort();
}
/// Swaps the position of 2 `RouteEntry`s in the route list.
pub fn swap(&mut self, first: usize, second: usize) {
self.list.swap(first, second)
}
pub fn get_mut(&mut self, index: usize) -> Option<&mut RouteEntry> {
self.list.get_mut(index).map(|(_, re)| re)
}
/// Insert a new [`RouteEntry`] in the `RouteList`.
pub fn insert(
&mut self,
re: RouteEntry,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancellation_token: CancellationToken,
) {
let expiration = re.expires();
let rk = RouteKey::new(re.source().subnet(), re.neighbour().clone());
let abort_handle = Arc::new(
tokio::spawn(async move {
tokio::select! {
_ = cancellation_token.cancelled() => {}
_ = tokio::time::sleep_until(expiration) => {
debug!(route_key = %rk, "Expired route entry for route key");
if let Err(e) = expired_route_entry_sink.send(rk).await {
error!(route_key = %e.0, "Failed to send expired route key on cleanup channel");
}
}
}
})
.abort_handle().into(),
);
self.list.push((abort_handle, re));
}
}
pub struct RouteGuard<'a> {
item: &'a mut (Arc<AbortHandle>, RouteEntry),
}
impl Deref for RouteGuard<'_> {
type Target = RouteEntry;
fn deref(&self) -> &Self::Target {
&self.item.1
}
}
impl DerefMut for RouteGuard<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.item.1
}
}
impl RouteGuard<'_> {
pub fn set_expires(
&mut self,
expires: tokio::time::Instant,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancellation_token: CancellationToken,
) {
let re = &mut self.item.1;
re.set_expires(expires);
let expiration = re.expires();
let rk = RouteKey::new(re.source().subnet(), re.neighbour().clone());
let abort_handle = Arc::new(
tokio::spawn(async move {
tokio::select! {
_ = cancellation_token.cancelled() => {}
_ = tokio::time::sleep_until(expiration) => {
debug!(route_key = %rk, "Expired route entry for route key");
if let Err(e) = expired_route_entry_sink.send(rk).await {
error!(route_key = %e.0, "Failed to send expired route key on cleanup channel");
}
}
}
})
.abort_handle().into(),
);
self.item.0.abort();
self.item.0 = abort_handle;
}
}
impl Index<usize> for RouteList {
type Output = RouteEntry;
fn index(&self, index: usize) -> &Self::Output {
&self.list[index].1
}
}
impl IndexMut<usize> for RouteList {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.list[index].1
}
}
pub struct RouteListIter<'a> {
route_list: &'a RouteList,
idx: usize,
}
impl<'a> RouteListIter<'a> {
/// Create a new `RouteListIter` which will iterate over the given [`RouteList`].
fn new(route_list: &'a RouteList) -> Self {
Self { route_list, idx: 0 }
}
}
impl<'a> Iterator for RouteListIter<'a> {
type Item = &'a RouteEntry;
fn next(&mut self) -> Option<Self::Item> {
self.idx += 1;
self.route_list.list.get(self.idx - 1).map(|(_, re)| re)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/queried_subnet.rs | mycelium/src/routing_table/queried_subnet.rs | use tokio::time::Instant;
use crate::subnet::Subnet;
/// Information about a [`subnet`](Subnet) which is currently in the queried state
#[derive(Debug, Clone, Copy)]
pub struct QueriedSubnet {
/// The subnet which was queried.
subnet: Subnet,
/// Time at which the query expires. If no feasible updates come in before this, the subnet is
/// marked as no route temporarily.
query_expires: Instant,
}
impl QueriedSubnet {
/// Create a new `QueriedSubnet` for the given [`subnet`](Subnet), expiring at the provided
/// [`time`](Instant).
pub fn new(subnet: Subnet, query_expires: Instant) -> Self {
Self {
subnet,
query_expires,
}
}
/// The [`subnet`](Subnet) being queried.
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// The moment this query expires. If no route is discovered before this, the [`subnet`](Subnet)
/// is marked as no route temporarily.
pub fn query_expires(&self) -> Instant {
self.query_expires
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/subnet_entry.rs | mycelium/src/routing_table/subnet_entry.rs | use arc_swap::ArcSwap;
use super::RouteList;
/// An entry for a [Subnet](crate::subnet::Subnet) in the routing table.
#[allow(dead_code)]
pub enum SubnetEntry {
/// Routes for the given subnet exist
Exists { list: ArcSwap<RouteList> },
/// Routes are being queried from peers for the given subnet, but we haven't gotten a response
/// yet
Queried { query_timeout: tokio::time::Instant },
/// We queried our peers for the subnet, but we didn't get a valid response in time, so there
/// is for sure no route to the subnet.
NoRoute { expiry: tokio::time::Instant },
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/no_route.rs | mycelium/src/routing_table/no_route.rs | use tokio::time::Instant;
use crate::subnet::Subnet;
/// Information about a [`subnet`](Subnet) which is currently marked as NoRoute.
#[derive(Debug, Clone, Copy)]
pub struct NoRouteSubnet {
/// The subnet which has no route.
subnet: Subnet,
/// Time at which the entry expires. After this timeout expires, the entry is removed and a new
/// query can be performed.
entry_expires: Instant,
}
impl NoRouteSubnet {
/// Create a new `NoRouteSubnet` for the given [`subnet`](Subnet), expiring at the provided
/// [`time`](Instant).
pub fn new(subnet: Subnet, entry_expires: Instant) -> Self {
Self {
subnet,
entry_expires,
}
}
/// The [`subnet`](Subnet) for which there is no route.
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// The moment this entry expires. Once this timeout expires, a new query can be launched for
/// route discovery for this [`subnet`](Subnet).
pub fn entry_expires(&self) -> Instant {
self.entry_expires
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/route_entry.rs | mycelium/src/routing_table/route_entry.rs | use tokio::time::Instant;
use crate::{
metric::Metric, peer::Peer, router_id::RouterId, sequence_number::SeqNo,
source_table::SourceKey,
};
/// RouteEntry holds all relevant information about a specific route. Since this includes the next
/// hop, a single subnet can have multiple route entries.
#[derive(Clone)]
pub struct RouteEntry {
source: SourceKey,
neighbour: Peer,
metric: Metric,
seqno: SeqNo,
selected: bool,
expires: Instant,
}
impl RouteEntry {
/// Create a new `RouteEntry` with the provided values.
pub fn new(
source: SourceKey,
neighbour: Peer,
metric: Metric,
seqno: SeqNo,
selected: bool,
expires: Instant,
) -> Self {
Self {
source,
neighbour,
metric,
seqno,
selected,
expires,
}
}
/// Return the [`SourceKey`] for this `RouteEntry`.
pub fn source(&self) -> SourceKey {
self.source
}
/// Return the [`neighbour`](Peer) used as next hop for this `RouteEntry`.
pub fn neighbour(&self) -> &Peer {
&self.neighbour
}
/// Return the [`Metric`] of this `RouteEntry`.
pub fn metric(&self) -> Metric {
self.metric
}
/// Return the [`sequence number`](SeqNo) for the `RouteEntry`.
pub fn seqno(&self) -> SeqNo {
self.seqno
}
/// Return if this [`RouteEntry`] is selected.
pub fn selected(&self) -> bool {
self.selected
}
/// Return the [`Instant`] when this `RouteEntry` expires if it doesn't get updated before
/// then.
pub fn expires(&self) -> Instant {
self.expires
}
/// Set the [`SourceKey`] for this `RouteEntry`.
pub fn set_source(&mut self, source: SourceKey) {
self.source = source;
}
/// Set the [`RouterId`] for this `RouteEntry`.
pub fn set_router_id(&mut self, router_id: RouterId) {
self.source.set_router_id(router_id)
}
/// Sets the [`neighbour`](Peer) for this `RouteEntry`.
pub fn set_neighbour(&mut self, neighbour: Peer) {
self.neighbour = neighbour;
}
/// Sets the [`Metric`] for this `RouteEntry`.
pub fn set_metric(&mut self, metric: Metric) {
self.metric = metric;
}
/// Sets the [`sequence number`](SeqNo) for this `RouteEntry`.
pub fn set_seqno(&mut self, seqno: SeqNo) {
self.seqno = seqno;
}
/// Sets if this `RouteEntry` is the selected route for the associated
/// [`Subnet`](crate::subnet::Subnet).
pub fn set_selected(&mut self, selected: bool) {
self.selected = selected;
}
/// Sets the expiration time for this [`RouteEntry`].
pub(super) fn set_expires(&mut self, expires: Instant) {
self.expires = expires;
}
}
// Manual Debug implementation since SharedSecret is explicitly not Debug
impl std::fmt::Debug for RouteEntry {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RouteEntry")
.field("source", &self.source)
.field("neighbour", &self.neighbour)
.field("metric", &self.metric)
.field("seqno", &self.seqno)
.field("selected", &self.selected)
.field("expires", &self.expires)
.finish()
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/iter.rs | mycelium/src/routing_table/iter.rs | use std::{net::Ipv6Addr, sync::Arc};
use crate::subnet::Subnet;
use super::{subnet_entry::SubnetEntry, NoRouteSubnet, QueriedSubnet, RouteListReadGuard};
/// An iterator over a [`routing table`](super::RoutingTable) giving read only access to
/// [`RouteList`]'s.
pub struct RoutingTableIter<'a>(
ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
);
impl<'a> RoutingTableIter<'a> {
/// Create a new `RoutingTableIter` which will iterate over all entries in a [`RoutingTable`].
pub(super) fn new(
inner: ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
) -> Self {
Self(inner)
}
}
impl Iterator for RoutingTableIter<'_> {
type Item = (Subnet, RouteListReadGuard);
fn next(&mut self) -> Option<Self::Item> {
for (ip, prefix_size, rl) in self.0.by_ref() {
if let SubnetEntry::Exists { list } = &**rl {
return Some((
Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets"),
RouteListReadGuard { inner: list.load() },
));
}
}
None
}
}
/// Iterator over queried routes in the routing table.
pub struct RoutingTableQueryIter<'a>(
ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
);
impl<'a> RoutingTableQueryIter<'a> {
/// Create a new `RoutingTableQueryIter` which will iterate over all queried entries in a [`RoutingTable`].
pub(super) fn new(
inner: ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
) -> Self {
Self(inner)
}
}
impl Iterator for RoutingTableQueryIter<'_> {
type Item = QueriedSubnet;
fn next(&mut self) -> Option<Self::Item> {
for (ip, prefix_size, rl) in self.0.by_ref() {
if let SubnetEntry::Queried { query_timeout } = &**rl {
return Some(QueriedSubnet::new(
Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets"),
*query_timeout,
));
}
}
None
}
}
/// Iterator for entries which are explicitly marked as "no route"in the routing table.
pub struct RoutingTableNoRouteIter<'a>(
ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
);
impl<'a> RoutingTableNoRouteIter<'a> {
/// Create a new `RoutingTableNoRouteIter` which will iterate over all entries in a [`RoutingTable`]
/// which are explicitly marked as `NoRoute`
pub(super) fn new(
inner: ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
) -> Self {
Self(inner)
}
}
impl Iterator for RoutingTableNoRouteIter<'_> {
type Item = NoRouteSubnet;
fn next(&mut self) -> Option<Self::Item> {
for (ip, prefix_size, rl) in self.0.by_ref() {
if let SubnetEntry::NoRoute { expiry } = &**rl {
return Some(NoRouteSubnet::new(
Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets"),
*expiry,
));
}
}
None
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/routing_table/route_key.rs | mycelium/src/routing_table/route_key.rs | use crate::{peer::Peer, subnet::Subnet};
/// RouteKey uniquely defines a route via a peer.
#[derive(Debug, Clone, PartialEq)]
pub struct RouteKey {
subnet: Subnet,
neighbour: Peer,
}
impl RouteKey {
/// Creates a new `RouteKey` for the given [`Subnet`] and [`neighbour`](Peer).
#[inline]
pub fn new(subnet: Subnet, neighbour: Peer) -> Self {
Self { subnet, neighbour }
}
/// Get's the [`Subnet`] identified by this `RouteKey`.
#[inline]
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// Gets the [`neighbour`](Peer) identified by this `RouteKey`.
#[inline]
pub fn neighbour(&self) -> &Peer {
&self.neighbour
}
}
impl std::fmt::Display for RouteKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"{} via {}",
self.subnet,
self.neighbour.connection_identifier()
))
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/connection/tls.rs | mycelium/src/connection/tls.rs | use std::{
io,
net::SocketAddr,
sync::{atomic::AtomicU64, Arc},
};
use futures::{
stream::{SplitSink, SplitStream},
SinkExt, StreamExt,
};
use tokio::net::TcpStream;
use tokio_util::codec::Framed;
use crate::{
connection::Tracked,
packet::{self, Packet},
};
/// A wrapper around an asynchronous TLS stream.
pub struct TlsStream {
framed: Framed<Tracked<tokio_openssl::SslStream<TcpStream>>, packet::Codec>,
local_addr: SocketAddr,
peer_addr: SocketAddr,
}
impl TlsStream {
/// Create a new wrapped [`TlsStream`] which implements the [`Connection`](super::Connection) trait.
pub fn new(
tls_stream: tokio_openssl::SslStream<TcpStream>,
read: Arc<AtomicU64>,
write: Arc<AtomicU64>,
) -> io::Result<Self> {
Ok(Self {
local_addr: tls_stream.get_ref().local_addr()?,
peer_addr: tls_stream.get_ref().peer_addr()?,
framed: Framed::new(Tracked::new(read, write, tls_stream), packet::Codec::new()),
})
}
}
impl super::Connection for TlsStream {
type ReadHalf = TlsStreamReadHalf;
type WriteHalf = TlsStreamWriteHalf;
async fn feed_data_packet(&mut self, packet: crate::packet::DataPacket) -> io::Result<()> {
self.framed.feed(Packet::DataPacket(packet)).await
}
async fn feed_control_packet(
&mut self,
packet: crate::packet::ControlPacket,
) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
async fn receive_packet(&mut self) -> Option<io::Result<crate::packet::Packet>> {
self.framed.next().await
}
fn identifier(&self) -> Result<String, io::Error> {
Ok(format!("TLS {} <-> {}", self.local_addr, self.peer_addr))
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(match self.peer_addr {
SocketAddr::V4(_) => super::PACKET_PROCESSING_COST_IP4_TCP,
SocketAddr::V6(ip) if ip.ip().to_ipv4_mapped().is_some() => {
super::PACKET_PROCESSING_COST_IP4_TCP
}
SocketAddr::V6(_) => super::PACKET_PROCESSING_COST_IP6_TCP,
})
}
fn split(self) -> (Self::ReadHalf, Self::WriteHalf) {
let (tx, rx) = self.framed.split();
(
TlsStreamReadHalf { framed: rx },
TlsStreamWriteHalf { framed: tx },
)
}
}
pub struct TlsStreamReadHalf {
framed: SplitStream<Framed<Tracked<tokio_openssl::SslStream<TcpStream>>, packet::Codec>>,
}
pub struct TlsStreamWriteHalf {
framed: SplitSink<
Framed<Tracked<tokio_openssl::SslStream<TcpStream>>, packet::Codec>,
packet::Packet,
>,
}
impl super::ConnectionReadHalf for TlsStreamReadHalf {
async fn receive_packet(&mut self) -> Option<io::Result<crate::packet::Packet>> {
self.framed.next().await
}
}
impl super::ConnectionWriteHalf for TlsStreamWriteHalf {
async fn feed_data_packet(&mut self, packet: crate::packet::DataPacket) -> io::Result<()> {
self.framed.feed(Packet::DataPacket(packet)).await
}
async fn feed_control_packet(
&mut self,
packet: crate::packet::ControlPacket,
) -> io::Result<()> {
self.framed.feed(Packet::ControlPacket(packet)).await
}
async fn flush(&mut self) -> io::Result<()> {
self.framed.flush().await
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/connection/tracked.rs | mycelium/src/connection/tracked.rs | use std::{
pin::Pin,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
task::Poll,
};
use tokio::io::{AsyncRead, AsyncWrite};
/// Wrapper which keeps track of how much bytes have been read and written from a connection.
pub struct Tracked<C> {
/// Bytes read counter
read: Arc<AtomicU64>,
/// Bytes written counter
write: Arc<AtomicU64>,
/// Underlying connection we are measuring
con: C,
}
impl<C> Tracked<C>
where
C: AsyncRead + AsyncWrite + Unpin,
{
/// Create a new instance of a tracked connections. Counters are passed in so they can be
/// reused accross connections.
pub fn new(read: Arc<AtomicU64>, write: Arc<AtomicU64>, con: C) -> Self {
Self { read, write, con }
}
}
impl<C> AsyncRead for Tracked<C>
where
C: AsyncRead + Unpin,
{
#[inline]
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let start_len = buf.filled().len();
let res = Pin::new(&mut self.con).poll_read(cx, buf);
if let Poll::Ready(Ok(())) = res {
self.read
.fetch_add((buf.filled().len() - start_len) as u64, Ordering::Relaxed);
}
res
}
}
impl<C> AsyncWrite for Tracked<C>
where
C: AsyncWrite + Unpin,
{
#[inline]
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let res = Pin::new(&mut self.con).poll_write(cx, buf);
if let Poll::Ready(Ok(written)) = res {
self.write.fetch_add(written as u64, Ordering::Relaxed);
}
res
}
#[inline]
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.con).poll_flush(cx)
}
#[inline]
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.con).poll_shutdown(cx)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
let res = Pin::new(&mut self.con).poll_write_vectored(cx, bufs);
if let Poll::Ready(Ok(written)) = res {
self.write.fetch_add(written as u64, Ordering::Relaxed);
}
res
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.con.is_write_vectored()
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/message/init.rs | mycelium/src/message/init.rs | use super::MessagePacket;
/// A message representing an init message.
///
/// The body of an init message has the following structure:
/// - 8 bytes size
pub struct MessageInit {
buffer: MessagePacket,
}
impl MessageInit {
/// Create a new `MessageInit` in the provided [`MessagePacket`].
pub fn new(mut buffer: MessagePacket) -> Self {
buffer.set_used_buffer_size(9);
buffer.header_mut().flags_mut().set_init();
Self { buffer }
}
/// Return the length of the message, as written in the body.
pub fn length(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[..8]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Return the topic of the message, as written in the body.
pub fn topic(&self) -> &[u8] {
let topic_len = self.buffer.buffer()[8] as usize;
&self.buffer.buffer()[9..9 + topic_len]
}
/// Set the length field of the message body.
pub fn set_length(&mut self, length: u64) {
self.buffer.buffer_mut()[..8].copy_from_slice(&length.to_be_bytes())
}
/// Set the topic in the message body.
///
/// # Panics
///
/// This function panics if the topic is longer than 255 bytes.
pub fn set_topic(&mut self, topic: &[u8]) {
assert!(
topic.len() <= u8::MAX as usize,
"Topic can be 255 bytes long at most"
);
self.buffer.set_used_buffer_size(9 + topic.len());
self.buffer.buffer_mut()[8] = topic.len() as u8;
self.buffer.buffer_mut()[9..9 + topic.len()].copy_from_slice(topic);
}
/// Convert the `MessageInit` into a reply. This does nothing if it is already a reply.
pub fn into_reply(mut self) -> Self {
self.buffer.header_mut().flags_mut().set_ack();
self
}
/// Consumes this `MessageInit`, returning the underlying [`MessagePacket`].
pub fn into_inner(self) -> MessagePacket {
self.buffer
}
}
#[cfg(test)]
mod tests {
use crate::{crypto::PacketBuffer, message::MessagePacket};
use super::MessageInit;
#[test]
fn init_flag_set() {
let mi = MessageInit::new(MessagePacket::new(PacketBuffer::new()));
let mp = mi.into_inner();
assert!(mp.header().flags().init());
}
#[test]
fn read_length() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[12..20].copy_from_slice(&[0, 0, 0, 0, 2, 3, 4, 5]);
let ms = MessageInit::new(MessagePacket::new(pb));
assert_eq!(ms.length(), 33_752_069);
}
#[test]
fn write_length() {
let mut ms = MessageInit::new(MessagePacket::new(PacketBuffer::new()));
ms.set_length(3_432_634_632);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[..8], &[0, 0, 0, 0, 204, 153, 217, 8]);
assert_eq!(ms.length(), 3_432_634_632);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/message/chunk.rs | mycelium/src/message/chunk.rs | use std::fmt;
use super::MessagePacket;
/// A message representing a "chunk" message.
///
/// The body of a chunk message has the following structure:
/// - 8 bytes: chunk index
/// - 8 bytes: chunk offset
/// - 8 bytes: chunk size
/// - remainder: chunk data of length based on field 3
pub struct MessageChunk {
buffer: MessagePacket,
}
impl MessageChunk {
/// Create a new `MessageChunk` in the provided [`MessagePacket`].
pub fn new(mut buffer: MessagePacket) -> Self {
buffer.set_used_buffer_size(24);
buffer.header_mut().flags_mut().set_chunk();
Self { buffer }
}
/// Return the index of the chunk in the message, as written in the body.
pub fn chunk_idx(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[..8]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Set the index of the chunk in the message body.
pub fn set_chunk_idx(&mut self, chunk_idx: u64) {
self.buffer.buffer_mut()[..8].copy_from_slice(&chunk_idx.to_be_bytes())
}
/// Return the chunk offset in the message, as written in the body.
pub fn chunk_offset(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[8..16]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Set the offset of the chunk in the message body.
pub fn set_chunk_offset(&mut self, chunk_offset: u64) {
self.buffer.buffer_mut()[8..16].copy_from_slice(&chunk_offset.to_be_bytes())
}
/// Return the size of the chunk in the message, as written in the body.
pub fn chunk_size(&self) -> u64 {
// Shield against a corrupt value.
u64::min(
u64::from_be_bytes(
self.buffer.buffer()[16..24]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
),
self.buffer.buffer().len() as u64 - 24,
)
}
/// Set the size of the chunk in the message body.
pub fn set_chunk_size(&mut self, chunk_size: u64) {
self.buffer.buffer_mut()[16..24].copy_from_slice(&chunk_size.to_be_bytes())
}
/// Return a reference to the chunk data in the message.
pub fn data(&self) -> &[u8] {
&self.buffer.buffer()[24..24 + self.chunk_size() as usize]
}
/// Set the chunk data in this message. This will also set the size field to the proper value.
pub fn set_chunk_data(&mut self, data: &[u8]) -> Result<(), InsufficientChunkSpace> {
let buf = self.buffer.buffer_mut();
let available_space = buf.len() - 24;
if data.len() > available_space {
return Err(InsufficientChunkSpace {
available: available_space,
needed: data.len(),
});
}
// Slicing based on data.len() is fine here as we just checked to make sure we can handle
// this capacity.
buf[24..24 + data.len()].copy_from_slice(data);
self.set_chunk_size(data.len() as u64);
// Also set the extra space used by the buffer on the underlying packet.
self.buffer.set_used_buffer_size(24 + data.len());
Ok(())
}
/// Convert the `MessageChunk` into a reply. This does nothing if it is already a reply.
pub fn into_reply(mut self) -> Self {
self.buffer.header_mut().flags_mut().set_ack();
// We want to leave the length field in tact but don't want to copy the data in the reply.
// This needs additional work on the underlying buffer.
// TODO
self
}
/// Consumes this `MessageChunk`, returning the underlying [`MessagePacket`].
pub fn into_inner(self) -> MessagePacket {
self.buffer
}
}
/// An error indicating not enough space is availbe in a message to set the chunk data.
#[derive(Debug)]
pub struct InsufficientChunkSpace {
/// Amount of space available in the chunk.
pub available: usize,
/// Amount of space needed to set the chunk data
pub needed: usize,
}
impl fmt::Display for InsufficientChunkSpace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"Insufficient capacity available, needed {} bytes, have {} bytes",
self.needed, self.available
)
}
}
impl std::error::Error for InsufficientChunkSpace {}
#[cfg(test)]
mod tests {
use std::array;
use crate::{crypto::PacketBuffer, message::MessagePacket};
use super::MessageChunk;
#[test]
fn chunk_flag_set() {
let mc = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
let mp = mc.into_inner();
assert!(mp.header().flags().chunk());
}
#[test]
fn read_chunk_idx() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[12..20].copy_from_slice(&[0, 0, 0, 0, 0, 0, 100, 73]);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_idx(), 25_673);
}
#[test]
fn write_chunk_idx() {
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_idx(723);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[..8], &[0, 0, 0, 0, 0, 0, 2, 211]);
assert_eq!(ms.chunk_idx(), 723);
}
#[test]
fn read_chunk_offset() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[20..28].copy_from_slice(&[0, 0, 0, 0, 0, 20, 40, 60]);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_offset(), 1_321_020);
}
#[test]
fn write_chunk_offset() {
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_offset(1_000_000);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[8..16], &[0, 0, 0, 0, 0, 15, 66, 64]);
assert_eq!(ms.chunk_offset(), 1_000_000);
}
#[test]
fn read_chunk_size() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[28..36].copy_from_slice(&[0, 0, 0, 0, 0, 0, 3, 232]);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_size(), 1_000);
}
#[test]
fn write_chunk_size() {
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_size(1_300);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[16..24], &[0, 0, 0, 0, 0, 0, 5, 20]);
assert_eq!(ms.chunk_size(), 1_300);
}
#[test]
fn read_chunk_data() {
const CHUNK_DATA: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut pb = PacketBuffer::new();
// Set data len
pb.buffer_mut()[28..36].copy_from_slice(&CHUNK_DATA.len().to_be_bytes());
pb.buffer_mut()[36..36 + CHUNK_DATA.len()].copy_from_slice(CHUNK_DATA);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_size(), 16);
assert_eq!(ms.data(), CHUNK_DATA);
}
#[test]
fn write_chunk_data() {
const CHUNK_DATA: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
let res = ms.set_chunk_data(CHUNK_DATA);
assert!(res.is_ok());
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
// Check and make sure size is properly set.
assert_eq!(&ms.buffer.buffer()[16..24], &[0, 0, 0, 0, 0, 0, 0, 16]);
assert_eq!(ms.chunk_size(), 16);
assert_eq!(ms.data(), CHUNK_DATA);
}
#[test]
fn write_chunk_data_oversized() {
let data: [u8; 1500] = array::from_fn(|_| 0xFF);
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
let res = ms.set_chunk_data(&data);
assert!(res.is_err());
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/message/topic.rs | mycelium/src/message/topic.rs | use crate::subnet::Subnet;
use core::fmt;
use serde::{
de::{Deserialize, Deserializer, MapAccess, Visitor},
Deserialize as DeserializeMacro,
};
use std::collections::HashMap;
use std::path::PathBuf;
/// Configuration for a topic whitelist, including allowed subnets and optional forward socket
#[derive(Debug, Default, Clone)]
pub struct TopicWhitelistConfig {
/// Subnets that are allowed to send messages to this topic
subnets: Vec<Subnet>,
/// Optional Unix domain socket path to forward messages to
forward_socket: Option<PathBuf>,
}
impl TopicWhitelistConfig {
/// Create a new empty whitelist config
pub fn new() -> Self {
Self::default()
}
/// Get the list of whitelisted subnets
pub fn subnets(&self) -> &Vec<Subnet> {
&self.subnets
}
/// Get the forward socket path, if any
pub fn forward_socket(&self) -> Option<&PathBuf> {
self.forward_socket.as_ref()
}
/// Set the forward socket path
pub fn set_forward_socket(&mut self, path: Option<PathBuf>) {
self.forward_socket = path;
}
/// Add a subnet to the whitelist
pub fn add_subnet(&mut self, subnet: Subnet) {
self.subnets.push(subnet);
}
/// Remove a subnet from the whitelist
pub fn remove_subnet(&mut self, subnet: &Subnet) {
self.subnets.retain(|s| s != subnet);
}
}
#[derive(Debug, Default, Clone)]
pub struct TopicConfig {
/// The default action to to take if no acl is defined for a topic.
default: MessageAction,
/// Explicitly configured whitelists for topics. Ip's which aren't part of the whitelist will
/// not be allowed to send messages to that topic. If a topic is not in this map, the default
/// action will be used.
whitelist: HashMap<Vec<u8>, TopicWhitelistConfig>,
}
impl TopicConfig {
/// Get the [`default action`](MessageAction) if the topic is not configured.
pub fn default(&self) -> MessageAction {
self.default
}
/// Set the default [`action`](MessageAction) which does not have a whitelist configured.
pub fn set_default(&mut self, default: MessageAction) {
self.default = default;
}
/// Get the fully configured whitelist
pub fn whitelist(&self) -> &HashMap<Vec<u8>, TopicWhitelistConfig> {
&self.whitelist
}
/// Insert a new topic in the whitelist, without any configured allowed sources.
pub fn add_topic_whitelist(&mut self, topic: Vec<u8>) {
self.whitelist.entry(topic).or_default();
}
/// Set the forward socket for a topic. Does nothing if the topic doesn't exist.
pub fn set_topic_forward_socket(&mut self, topic: Vec<u8>, socket_path: Option<PathBuf>) {
self.whitelist
.entry(topic)
.and_modify(|c| c.set_forward_socket(socket_path));
}
/// Get the forward socket for a topic, if any.
pub fn get_topic_forward_socket(&self, topic: &Vec<u8>) -> Option<&PathBuf> {
self.whitelist
.get(topic)
.and_then(|config| config.forward_socket())
}
/// Remove a topic from the whitelist. Future messages will follow the default action.
pub fn remove_topic_whitelist(&mut self, topic: &Vec<u8>) {
self.whitelist.remove(topic);
}
/// Adds a new whitelisted source for a topic. This creates the topic if it does not exist yet.
pub fn add_topic_whitelist_src(&mut self, topic: Vec<u8>, src: Subnet) {
self.whitelist.entry(topic).or_default().add_subnet(src);
}
/// Removes a whitelisted source for a topic.
///
/// If the last source is removed for a topic, the entry remains, and must be cleared by calling
/// [`Self::remove_topic_whitelist`] to fall back to the default action. Note that an empty
/// whitelist effectively blocks all messages for a topic.
///
/// This does nothing if the topic does not exist.
pub fn remove_topic_whitelist_src(&mut self, topic: &Vec<u8>, src: Subnet) {
if let Some(whitelist_config) = self.whitelist.get_mut(topic) {
whitelist_config.remove_subnet(&src);
}
}
}
#[derive(Debug, Default, Clone, Copy, DeserializeMacro)]
pub enum MessageAction {
/// Accept the message
#[default]
Accept,
/// Reject the message
Reject,
}
// Helper function to parse a subnet from a string
fn parse_subnet_str<E>(s: &str) -> Result<Subnet, E>
where
E: serde::de::Error,
{
// Try to parse as a subnet (with prefix)
if let Ok(ipnet) = s.parse::<ipnet::IpNet>() {
return Subnet::new(ipnet.addr(), ipnet.prefix_len())
.map_err(|e| serde::de::Error::custom(format!("Invalid subnet prefix length: {e}")));
}
// Try to parse as an IP address (convert to /32 or /128 subnet)
if let Ok(ip) = s.parse::<std::net::IpAddr>() {
let prefix_len = match ip {
std::net::IpAddr::V4(_) => 32,
std::net::IpAddr::V6(_) => 128,
};
return Subnet::new(ip, prefix_len)
.map_err(|e| serde::de::Error::custom(format!("Invalid subnet prefix length: {e}")));
}
Err(serde::de::Error::custom(format!(
"Invalid subnet or IP address: {s}",
)))
}
// Define a struct for deserializing the whitelist config
#[derive(DeserializeMacro)]
struct WhitelistConfigData {
#[serde(default)]
subnets: Vec<String>,
#[serde(default)]
forward_socket: Option<String>,
}
// Add this implementation right after the TopicConfig struct definition
impl<'de> Deserialize<'de> for TopicConfig {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct TopicConfigVisitor;
impl<'de> Visitor<'de> for TopicConfigVisitor {
type Value = TopicConfig;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a topic configuration")
}
fn visit_map<V>(self, mut map: V) -> Result<TopicConfig, V::Error>
where
V: MapAccess<'de>,
{
let mut default = MessageAction::default();
let mut whitelist = HashMap::new();
while let Some(key) = map.next_key::<String>()? {
if key == "default" {
default = map.next_value()?;
} else {
// Try to parse as a WhitelistConfigData first
if let Ok(config_data) = map.next_value::<WhitelistConfigData>() {
let mut whitelist_config = TopicWhitelistConfig::default();
// Process subnets
for subnet_str in config_data.subnets {
let subnet = parse_subnet_str(&subnet_str)?;
whitelist_config.add_subnet(subnet);
}
// Process forward_socket
if let Some(socket_path) = config_data.forward_socket {
whitelist_config
.set_forward_socket(Some(PathBuf::from(socket_path)));
}
// Convert string key to Vec<u8>
whitelist.insert(key.into_bytes(), whitelist_config);
} else {
// Fallback to old format: just a list of subnets
let subnet_strs = map.next_value::<Vec<String>>()?;
let mut whitelist_config = TopicWhitelistConfig::default();
for subnet_str in subnet_strs {
let subnet = parse_subnet_str(&subnet_str)?;
whitelist_config.add_subnet(subnet);
}
// Convert string key to Vec<u8>
whitelist.insert(key.into_bytes(), whitelist_config);
}
}
}
Ok(TopicConfig { default, whitelist })
}
}
deserializer.deserialize_map(TopicConfigVisitor)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/message/done.rs | mycelium/src/message/done.rs | use super::{MessageChecksum, MessagePacket, MESSAGE_CHECKSUM_LENGTH};
/// A message representing a "done" message.
///
/// The body of a done message has the following structure:
/// - 8 bytes: chunks transmitted
/// - 32 bytes: checksum of the transmitted data
pub struct MessageDone {
buffer: MessagePacket,
}
impl MessageDone {
/// Create a new `MessageDone` in the provided [`MessagePacket`].
pub fn new(mut buffer: MessagePacket) -> Self {
buffer.set_used_buffer_size(40);
buffer.header_mut().flags_mut().set_done();
Self { buffer }
}
/// Return the amount of chunks in the message, as written in the body.
pub fn chunk_count(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[..8]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Set the amount of chunks field of the message body.
pub fn set_chunk_count(&mut self, chunk_count: u64) {
self.buffer.buffer_mut()[..8].copy_from_slice(&chunk_count.to_be_bytes())
}
/// Get the checksum of the message from the body.
pub fn checksum(&self) -> MessageChecksum {
MessageChecksum::from_bytes(
self.buffer.buffer()[8..8 + MESSAGE_CHECKSUM_LENGTH]
.try_into()
.expect("Buffer contains enough data for a checksum; qed"),
)
}
/// Set the checksum of the message in the body.
pub fn set_checksum(&mut self, checksum: MessageChecksum) {
self.buffer.buffer_mut()[8..8 + MESSAGE_CHECKSUM_LENGTH]
.copy_from_slice(checksum.as_bytes())
}
/// Convert the `MessageDone` into a reply. This does nothing if it is already a reply.
pub fn into_reply(mut self) -> Self {
self.buffer.header_mut().flags_mut().set_ack();
self
}
/// Consumes this `MessageDone`, returning the underlying [`MessagePacket`].
pub fn into_inner(self) -> MessagePacket {
self.buffer
}
}
#[cfg(test)]
mod tests {
use crate::{
crypto::PacketBuffer,
message::{MessageChecksum, MessagePacket},
};
use super::MessageDone;
#[test]
fn done_flag_set() {
let md = MessageDone::new(MessagePacket::new(PacketBuffer::new()));
let mp = md.into_inner();
assert!(mp.header().flags().done());
}
#[test]
fn read_chunk_count() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[12..20].copy_from_slice(&[0, 0, 0, 0, 0, 0, 73, 55]);
let ms = MessageDone::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_count(), 18_743);
}
#[test]
fn write_chunk_count() {
let mut ms = MessageDone::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_count(10_000);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[..8], &[0, 0, 0, 0, 0, 0, 39, 16]);
assert_eq!(ms.chunk_count(), 10_000);
}
#[test]
fn read_checksum() {
const CHECKSUM: MessageChecksum = MessageChecksum::from_bytes([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F,
]);
let mut pb = PacketBuffer::new();
pb.buffer_mut()[20..52].copy_from_slice(CHECKSUM.as_bytes());
let ms = MessageDone::new(MessagePacket::new(pb));
assert_eq!(ms.checksum(), CHECKSUM);
}
#[test]
fn write_checksum() {
const CHECKSUM: MessageChecksum = MessageChecksum::from_bytes([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F,
]);
let mut ms = MessageDone::new(MessagePacket::new(PacketBuffer::new()));
ms.set_checksum(CHECKSUM);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[8..40], CHECKSUM.as_bytes());
assert_eq!(ms.checksum(), CHECKSUM);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/packet/control.rs | mycelium/src/packet/control.rs | use std::{io, net::IpAddr, time::Duration};
use bytes::BytesMut;
use tokio_util::codec::{Decoder, Encoder};
use crate::{
babel, metric::Metric, peer::Peer, router_id::RouterId, sequence_number::SeqNo, subnet::Subnet,
};
pub type ControlPacket = babel::Tlv;
pub struct Codec {
// TODO: wrapper to make it easier to deserialize
codec: babel::Codec,
}
impl ControlPacket {
pub fn new_hello(dest_peer: &Peer, interval: Duration) -> Self {
let tlv: babel::Tlv =
babel::Hello::new_unicast(dest_peer.hello_seqno(), (interval.as_millis() / 10) as u16)
.into();
dest_peer.increment_hello_seqno();
tlv
}
pub fn new_ihu(rx_cost: Metric, interval: Duration, dest_address: Option<IpAddr>) -> Self {
babel::Ihu::new(rx_cost, (interval.as_millis() / 10) as u16, dest_address).into()
}
pub fn new_update(
interval: Duration,
seqno: SeqNo,
metric: Metric,
subnet: Subnet,
router_id: RouterId,
) -> Self {
babel::Update::new(interval, seqno, metric, subnet, router_id).into()
}
}
impl Codec {
pub fn new() -> Self {
Codec {
codec: babel::Codec::new(),
}
}
}
impl Decoder for Codec {
type Item = ControlPacket;
type Error = std::io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.codec.decode(buf)
}
}
impl Encoder<ControlPacket> for Codec {
type Error = io::Error;
fn encode(&mut self, message: ControlPacket, buf: &mut BytesMut) -> Result<(), Self::Error> {
self.codec.encode(message, buf)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/packet/data.rs | mycelium/src/packet/data.rs | use std::net::Ipv6Addr;
use bytes::{Buf, BufMut, BytesMut};
use tokio_util::codec::{Decoder, Encoder};
/// Size of the header start for a data packet (before the IP addresses).
const DATA_PACKET_HEADER_SIZE: usize = 4;
/// Mask to extract data length from
const DATA_PACKET_LEN_MASK: u32 = (1 << 16) - 1;
#[derive(Debug, Clone)]
pub struct DataPacket {
pub raw_data: Vec<u8>, // encrypted data itself, then append the nonce
/// Max amount of hops for the packet.
pub hop_limit: u8,
pub src_ip: Ipv6Addr,
pub dst_ip: Ipv6Addr,
}
pub struct Codec {
header_vals: Option<HeaderValues>,
src_ip: Option<Ipv6Addr>,
dest_ip: Option<Ipv6Addr>,
}
/// Data from the DataPacket header.
#[derive(Clone, Copy)]
struct HeaderValues {
len: u16,
hop_limit: u8,
}
impl Codec {
pub fn new() -> Self {
Codec {
header_vals: None,
src_ip: None,
dest_ip: None,
}
}
}
impl Decoder for Codec {
type Item = DataPacket;
type Error = std::io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// Determine the length of the data
let HeaderValues { len, hop_limit } = if let Some(header_vals) = self.header_vals {
header_vals
} else {
// Check we have enough data to decode
if src.len() < DATA_PACKET_HEADER_SIZE {
return Ok(None);
}
let raw_header = src.get_u32();
// Hop limit is the last 8 bits.
let hop_limit = (raw_header & 0xFF) as u8;
let data_len = ((raw_header >> 8) & DATA_PACKET_LEN_MASK) as u16;
let header_vals = HeaderValues {
len: data_len,
hop_limit,
};
self.header_vals = Some(header_vals);
header_vals
};
let data_len = len as usize;
// Determine the source IP
let src_ip = if let Some(src_ip) = self.src_ip {
src_ip
} else {
if src.len() < 16 {
return Ok(None);
}
// Decode octets
let mut ip_bytes = [0u8; 16];
ip_bytes.copy_from_slice(&src[..16]);
let src_ip = Ipv6Addr::from(ip_bytes);
src.advance(16);
self.src_ip = Some(src_ip);
src_ip
};
// Determine the destination IP
let dest_ip = if let Some(dest_ip) = self.dest_ip {
dest_ip
} else {
if src.len() < 16 {
return Ok(None);
}
// Decode octets
let mut ip_bytes = [0u8; 16];
ip_bytes.copy_from_slice(&src[..16]);
let dest_ip = Ipv6Addr::from(ip_bytes);
src.advance(16);
self.dest_ip = Some(dest_ip);
dest_ip
};
// Check we have enough data to decode
if src.len() < data_len {
return Ok(None);
}
// Decode octets
let mut data = vec![0u8; data_len];
data.copy_from_slice(&src[..data_len]);
src.advance(data_len);
// Reset state
self.header_vals = None;
self.dest_ip = None;
self.src_ip = None;
Ok(Some(DataPacket {
raw_data: data,
hop_limit,
dst_ip: dest_ip,
src_ip,
}))
}
}
impl Encoder<DataPacket> for Codec {
type Error = std::io::Error;
fn encode(&mut self, item: DataPacket, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.reserve(item.raw_data.len() + DATA_PACKET_HEADER_SIZE + 16 + 16);
let mut raw_header = 0;
// Add length of the data
raw_header |= (item.raw_data.len() as u32) << 8;
// And hop limit
raw_header |= item.hop_limit as u32;
dst.put_u32(raw_header);
// Write the source IP
dst.put_slice(&item.src_ip.octets());
// Write the destination IP
dst.put_slice(&item.dst_ip.octets());
// Write the data
dst.extend_from_slice(&item.raw_data);
Ok(())
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel/tlv.rs | mycelium/src/babel/tlv.rs | pub use super::{hello::Hello, ihu::Ihu, update::Update};
use super::{route_request::RouteRequest, SeqNoRequest};
/// A single `Tlv` in a babel packet body.
#[derive(Debug, Clone, PartialEq)]
pub enum Tlv {
/// Hello Tlv type.
Hello(Hello),
/// Ihu Tlv type.
Ihu(Ihu),
/// Update Tlv type.
Update(Update),
/// RouteRequest Tlv type.
RouteRequest(RouteRequest),
/// SeqNoRequest Tlv type
SeqNoRequest(SeqNoRequest),
}
impl Tlv {
/// Calculate the size on the wire for this `Tlv`. This DOES NOT included the TLV header size
/// (2 bytes).
pub fn wire_size(&self) -> u8 {
match self {
Self::Hello(hello) => hello.wire_size(),
Self::Ihu(ihu) => ihu.wire_size(),
Self::Update(update) => update.wire_size(),
Self::RouteRequest(route_request) => route_request.wire_size(),
Self::SeqNoRequest(seqno_request) => seqno_request.wire_size(),
}
}
/// Encode this `Tlv` as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
match self {
Self::Hello(hello) => hello.write_bytes(dst),
Self::Ihu(ihu) => ihu.write_bytes(dst),
Self::Update(update) => update.write_bytes(dst),
Self::RouteRequest(route_request) => route_request.write_bytes(dst),
Self::SeqNoRequest(seqno_request) => seqno_request.write_bytes(dst),
}
}
}
impl From<SeqNoRequest> for Tlv {
fn from(v: SeqNoRequest) -> Self {
Self::SeqNoRequest(v)
}
}
impl From<RouteRequest> for Tlv {
fn from(v: RouteRequest) -> Self {
Self::RouteRequest(v)
}
}
impl From<Update> for Tlv {
fn from(v: Update) -> Self {
Self::Update(v)
}
}
impl From<Ihu> for Tlv {
fn from(v: Ihu) -> Self {
Self::Ihu(v)
}
}
impl From<Hello> for Tlv {
fn from(v: Hello) -> Self {
Self::Hello(v)
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel/route_request.rs | mycelium/src/babel/route_request.rs | use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::subnet::Subnet;
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// Base wire size of a [`RouteRequest`] without variable length address encoding.
const ROUTE_REQUEST_BASE_WIRE_SIZE: u8 = 3;
/// Seqno request TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-route-request
#[derive(Debug, Clone, PartialEq)]
pub struct RouteRequest {
/// The prefix being requested
prefix: Option<Subnet>,
/// The requests' generation
generation: u8,
}
impl RouteRequest {
/// Creates a new `RouteRequest` for the given [`prefix`]. If no [`prefix`] is given, a full
/// route table dumb in requested.
///
/// [`prefix`]: Subnet
pub fn new(prefix: Option<Subnet>, generation: u8) -> Self {
Self { prefix, generation }
}
/// Return the [`prefix`](Subnet) associated with this `RouteRequest`.
pub fn prefix(&self) -> Option<Subnet> {
self.prefix
}
/// Return the generation of the `RouteRequest`, which is the amount of times it has been
/// forwarded already.
pub fn generation(&self) -> u8 {
self.generation
}
/// Increment the generation of the `RouteRequest`.
pub fn inc_generation(&mut self) {
self.generation += 1
}
/// Calculates the size on the wire of this `RouteRequest`.
pub fn wire_size(&self) -> u8 {
ROUTE_REQUEST_BASE_WIRE_SIZE
+ (if let Some(prefix) = self.prefix {
prefix.prefix_len().div_ceil(8)
} else {
0
})
}
/// Construct a `RouteRequest` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `RouteRequest`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let generation = src.get_u8();
let ae = src.get_u8();
let plen = src.get_u8();
let prefix_size = plen.div_ceil(8) as usize;
let prefix_ip = match ae {
AE_WILDCARD => None,
AE_IPV4 => {
if plen > 32 {
return None;
}
let mut raw_ip = [0; 4];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Some(Ipv4Addr::from(raw_ip).into())
}
AE_IPV6 => {
if plen > 128 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Some(Ipv6Addr::from(raw_ip).into())
}
AE_IPV6_LL => {
if plen != 64 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Some(Ipv6Addr::from(raw_ip).into())
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in route_request packet, drop packet");
src.advance(len as usize - 3);
return None;
}
};
let prefix = prefix_ip.and_then(|prefix| Subnet::new(prefix, plen).ok());
trace!("Read route_request tlv body");
Some(RouteRequest { prefix, generation })
}
/// Encode this `RouteRequest` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(self.generation);
if let Some(prefix) = self.prefix {
dst.put_u8(match prefix.address() {
IpAddr::V4(_) => AE_IPV4,
IpAddr::V6(_) => AE_IPV6,
});
dst.put_u8(prefix.prefix_len());
let prefix_len = prefix.prefix_len().div_ceil(8) as usize;
match prefix.address() {
IpAddr::V4(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
IpAddr::V6(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
}
} else {
dst.put_u8(AE_WILDCARD);
// Prefix len MUST be 0 for wildcard requests
dst.put_u8(0);
}
}
}
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
use bytes::Buf;
use crate::subnet::Subnet;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv6Addr::new(512, 25, 26, 27, 28, 0, 0, 29).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
generation: 2,
};
rr.write_bytes(&mut buf);
assert_eq!(buf.len(), 11);
assert_eq!(buf[..11], [2, 2, 64, 2, 0, 0, 25, 0, 26, 0, 27]);
let mut buf = bytes::BytesMut::new();
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv4Addr::new(10, 101, 4, 1).into(), 32)
.expect("32 is a valid IPv4 prefix size; qed"),
),
generation: 3,
};
rr.write_bytes(&mut buf);
assert_eq!(buf.len(), 7);
assert_eq!(buf[..7], [3, 1, 32, 10, 101, 4, 1]);
let mut buf = bytes::BytesMut::new();
let rr = super::RouteRequest {
prefix: None,
generation: 0,
};
rr.write_bytes(&mut buf);
assert_eq!(buf.len(), 3);
assert_eq!(buf[..3], [0, 0, 0]);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(&[12, 0, 0][..]);
let rr = super::RouteRequest {
prefix: None,
generation: 12,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[24, 1, 24, 10, 15, 19][..]);
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv4Addr::new(10, 15, 19, 0).into(), 24)
.expect("24 is a valid IPv4 prefix size; qed"),
),
generation: 24,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[7, 2, 64, 0, 10, 0, 20, 0, 30, 0, 40][..]);
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv6Addr::new(10, 20, 30, 40, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
generation: 7,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[4, 3, 64, 0, 10, 0, 20, 0, 30, 0, 40][..]);
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
generation: 4,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
0, 4, 64, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
][..],
);
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
None
);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let seqno_src = super::RouteRequest::new(
Some(
Subnet::new(
Ipv6Addr::new(0x21f, 0x4025, 0xabcd, 0xdead, 0, 0, 0, 0).into(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed"),
),
27,
);
seqno_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::RouteRequest::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(seqno_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel/ihu.rs | mycelium/src/babel/ihu.rs | //! The babel [IHU TLV](https://datatracker.ietf.org/doc/html/rfc8966#name-ihu).
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::metric::Metric;
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// Base wire size of an [`Ihu`] without variable length address encoding.
const IHU_BASE_WIRE_SIZE: u8 = 6;
/// IHU TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-ihu.
#[derive(Debug, Clone, PartialEq)]
pub struct Ihu {
rx_cost: Metric,
interval: u16,
address: Option<IpAddr>,
}
impl Ihu {
/// Create a new `Ihu` to be transmitted.
pub fn new(rx_cost: Metric, interval: u16, address: Option<IpAddr>) -> Self {
// An interval of 0 is illegal according to the RFC, as this value is used by the receiver
// to calculate the hold time.
if interval == 0 {
panic!("Ihu interval MUST NOT be 0");
}
Self {
rx_cost,
interval,
address,
}
}
/// Calculates the size on the wire of this `Ihu`.
pub fn wire_size(&self) -> u8 {
IHU_BASE_WIRE_SIZE
+ match self.address {
None => 0,
Some(IpAddr::V4(_)) => 4,
// TODO: link local should be encoded differently
Some(IpAddr::V6(_)) => 16,
}
}
/// Construct a `Ihu` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `Ihu`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let ae = src.get_u8();
// read and ignore reserved byte
let _ = src.get_u8();
let rx_cost = src.get_u16().into();
let interval = src.get_u16();
let address = match ae {
AE_WILDCARD => None,
AE_IPV4 => {
let mut raw_ip = [0; 4];
raw_ip.copy_from_slice(&src[..4]);
src.advance(4);
Some(Ipv4Addr::from(raw_ip).into())
}
AE_IPV6 => {
let mut raw_ip = [0; 16];
raw_ip.copy_from_slice(&src[..16]);
src.advance(16);
Some(Ipv6Addr::from(raw_ip).into())
}
AE_IPV6_LL => {
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Some(Ipv6Addr::from(raw_ip).into())
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in IHU TLV, drop TLV");
src.advance(len as usize - 6);
return None;
}
};
trace!("Read ihu tlv body");
Some(Self {
rx_cost,
interval,
address,
})
}
/// Encode this `Ihu` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(match self.address {
None => AE_WILDCARD,
Some(IpAddr::V4(_)) => AE_IPV4,
Some(IpAddr::V6(_)) => AE_IPV6,
});
// reserved byte, must be all 0
dst.put_u8(0);
dst.put_u16(self.rx_cost.into());
dst.put_u16(self.interval);
match self.address {
None => {}
Some(IpAddr::V4(ip)) => dst.put_slice(&ip.octets()),
Some(IpAddr::V6(ip)) => dst.put_slice(&ip.octets()),
}
}
}
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let ihu = super::Ihu {
rx_cost: 25.into(),
interval: 400,
address: Some(Ipv4Addr::new(1, 1, 1, 1).into()),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 10);
assert_eq!(buf[..10], [1, 0, 0, 25, 1, 144, 1, 1, 1, 1]);
let mut buf = bytes::BytesMut::new();
let ihu = super::Ihu {
rx_cost: 100.into(),
interval: 4000,
address: Some(Ipv6Addr::new(2, 0, 1234, 2345, 3456, 4567, 5678, 1).into()),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 22);
assert_eq!(
buf[..22],
[2, 0, 0, 100, 15, 160, 0, 2, 0, 0, 4, 210, 9, 41, 13, 128, 17, 215, 22, 46, 0, 1]
);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(&[0, 0, 0, 1, 1, 44][..]);
let ihu = super::Ihu {
rx_cost: 1.into(),
interval: 300,
address: None,
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[1, 0, 0, 2, 0, 44, 3, 4, 5, 6][..]);
let ihu = super::Ihu {
rx_cost: 2.into(),
interval: 44,
address: Some(Ipv4Addr::new(3, 4, 5, 6).into()),
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(
&[
2, 0, 0, 2, 0, 44, 4, 0, 0, 0, 0, 5, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
][..],
);
let ihu = super::Ihu {
rx_cost: 2.into(),
interval: 44,
address: Some(Ipv6Addr::new(0x400, 0, 5, 6, 0x708, 0x90a, 0xb0c, 0xd0e).into()),
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[3, 0, 1, 2, 0, 42, 7, 8, 9, 10, 11, 12, 13, 14][..]);
let ihu = super::Ihu {
rx_cost: 258.into(),
interval: 42,
address: Some(Ipv6Addr::new(0xfe80, 0, 0, 0, 0x708, 0x90a, 0xb0c, 0xd0e).into()),
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
4, 0, 0, 2, 0, 44, 2, 0, 0, 0, 0, 5, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
][..],
);
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), None);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let hello_src = super::Ihu::new(
16.into(),
400,
Some(Ipv6Addr::new(156, 5646, 4164, 1236, 872, 960, 10, 844).into()),
);
hello_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::Ihu::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(hello_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel/update.rs | mycelium/src/babel/update.rs | //! The babel [Update TLV](https://datatracker.ietf.org/doc/html/rfc8966#name-update).
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
time::Duration,
};
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::{metric::Metric, router_id::RouterId, sequence_number::SeqNo, subnet::Subnet};
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// Flag bit indicating an [`Update`] TLV establishes a new default prefix.
#[allow(dead_code)]
const UPDATE_FLAG_PREFIX: u8 = 0x80;
/// Flag bit indicating an [`Update`] TLV establishes a new default router-id.
#[allow(dead_code)]
const UPDATE_FLAG_ROUTER_ID: u8 = 0x40;
/// Mask to apply to [`Update`] flags, leaving only valid flags.
const FLAG_MASK: u8 = 0b1100_0000;
/// Base wire size of an [`Update`] without variable length address encoding.
const UPDATE_BASE_WIRE_SIZE: u8 = 10 + RouterId::BYTE_SIZE as u8;
/// Update TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-update.
#[derive(Debug, Clone, PartialEq)]
pub struct Update {
/// Flags set in the TLV.
flags: u8,
/// Upper bound in centiseconds after which a new `Update` is sent. Must not be 0.
interval: u16,
/// Senders sequence number.
seqno: SeqNo,
/// Senders metric for this route.
metric: Metric,
/// The [`Subnet`] contained in this update. An update packet itself can contain any allowed
/// subnet.
subnet: Subnet,
/// Router id of the sender. Importantly this is not part of the update itself, though we do
/// transmit it for now as such.
router_id: RouterId,
}
impl Update {
/// Create a new `Update`.
pub fn new(
interval: Duration,
seqno: SeqNo,
metric: Metric,
subnet: Subnet,
router_id: RouterId,
) -> Self {
let interval_centiseconds = (interval.as_millis() / 10) as u16;
Self {
// No flags used for now
flags: 0,
interval: interval_centiseconds,
seqno,
metric,
subnet,
router_id,
}
}
/// Returns the [`SeqNo`] of the sender of this `Update`.
pub fn seqno(&self) -> SeqNo {
self.seqno
}
/// Return the [`Metric`] of the sender for this route in the `Update`.
pub fn metric(&self) -> Metric {
self.metric
}
/// Return the [`Subnet`] in this `Update.`
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// Return the [`router-id`](PublicKey) of the router who advertised this [`Prefix`](IpAddr).
pub fn router_id(&self) -> RouterId {
self.router_id
}
/// Calculates the size on the wire of this `Update`.
pub fn wire_size(&self) -> u8 {
let address_bytes = self.subnet.prefix_len().div_ceil(8);
UPDATE_BASE_WIRE_SIZE + address_bytes
}
/// Get the time until a new `Update` for the [`Subnet`] is received at the latest.
pub fn interval(&self) -> Duration {
// Interval is expressed as centiseconds on the wire.
Duration::from_millis(self.interval as u64 * 10)
}
/// Construct an `Update` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `Update`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let ae = src.get_u8();
let flags = src.get_u8() & FLAG_MASK;
let plen = src.get_u8();
// Read "omitted" value, we assume this is 0
let _ = src.get_u8();
let interval = src.get_u16();
let seqno = src.get_u16().into();
let metric = src.get_u16().into();
let prefix_size = plen.div_ceil(8) as usize;
let prefix = match ae {
AE_WILDCARD => {
if prefix_size != 0 {
return None;
}
// TODO: this is a temporary placeholder until we figure out how to handle this
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into()
}
AE_IPV4 => {
if plen > 32 {
return None;
}
let mut raw_ip = [0; 4];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv4Addr::from(raw_ip).into()
}
AE_IPV6 => {
if plen > 128 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv6Addr::from(raw_ip).into()
}
AE_IPV6_LL => {
if plen != 64 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Ipv6Addr::from(raw_ip).into()
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in update packet, drop packet");
src.advance(len as usize - 10);
return None;
}
};
let subnet = Subnet::new(prefix, plen).ok()?;
let mut router_id_bytes = [0u8; RouterId::BYTE_SIZE];
router_id_bytes.copy_from_slice(&src[..RouterId::BYTE_SIZE]);
src.advance(RouterId::BYTE_SIZE);
let router_id = RouterId::from(router_id_bytes);
trace!("Read update tlv body");
Some(Update {
flags,
interval,
seqno,
metric,
subnet,
router_id,
})
}
/// Encode this `Update` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(match self.subnet.address() {
IpAddr::V4(_) => AE_IPV4,
IpAddr::V6(_) => AE_IPV6,
});
dst.put_u8(self.flags);
dst.put_u8(self.subnet.prefix_len());
// Write "omitted" value, currently not used in our encoding scheme.
dst.put_u8(0);
dst.put_u16(self.interval);
dst.put_u16(self.seqno.into());
dst.put_u16(self.metric.into());
let prefix_len = self.subnet.prefix_len().div_ceil(8) as usize;
match self.subnet.address() {
IpAddr::V4(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
IpAddr::V6(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
}
dst.put_slice(&self.router_id.as_bytes()[..])
}
}
#[cfg(test)]
mod tests {
use std::{
net::{Ipv4Addr, Ipv6Addr},
time::Duration,
};
use crate::{router_id::RouterId, subnet::Subnet};
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let ihu = super::Update {
flags: 0b1100_0000,
interval: 400,
seqno: 17.into(),
metric: 25.into(),
subnet: Subnet::new(Ipv6Addr::new(512, 25, 26, 27, 28, 0, 0, 29).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([1u8; RouterId::BYTE_SIZE]),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 58);
assert_eq!(
buf[..58],
[
2, 192, 64, 0, 1, 144, 0, 17, 0, 25, 2, 0, 0, 25, 0, 26, 0, 27, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1
]
);
let mut buf = bytes::BytesMut::new();
let ihu = super::Update {
flags: 0b0000_0000,
interval: 600,
seqno: 170.into(),
metric: 256.into(),
subnet: Subnet::new(Ipv4Addr::new(10, 101, 4, 1).into(), 23)
.expect("23 is a valid IPv4 prefix size; qed"),
router_id: RouterId::from([2u8; RouterId::BYTE_SIZE]),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 53);
assert_eq!(
buf[..53],
[
1, 0, 23, 0, 2, 88, 0, 170, 1, 0, 10, 101, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
]
);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(
&[
0, 64, 0, 0, 0, 100, 0, 70, 2, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
][..],
);
let ihu = super::Update {
flags: 0b0100_0000,
interval: 100,
seqno: 70.into(),
metric: 512.into(),
subnet: Subnet::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into(), 0)
.expect("0 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([3u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::Update::from_bytes(&mut buf, buf_len as u8),
Some(ihu)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(
&[
3, 0, 64, 0, 3, 232, 0, 42, 3, 1, 0, 10, 0, 20, 0, 30, 0, 40, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
][..],
);
let ihu = super::Update {
flags: 0b0000_0000,
interval: 1000,
seqno: 42.into(),
metric: 769.into(),
subnet: Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("92 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([4u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::Update::from_bytes(&mut buf, buf_len as u8),
Some(ihu)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
4, 0, 64, 0, 0, 44, 2, 0, 0, 10, 10, 5, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
][..],
);
let buf_len = buf.len();
assert_eq!(super::Update::from_bytes(&mut buf, buf_len as u8), None);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_flag_bits() {
// Set all flag bits, only allowed bits should be set on the decoded value
let mut buf = bytes::BytesMut::from(
&[
3, 255, 64, 0, 3, 232, 0, 42, 3, 1, 0, 10, 0, 20, 0, 30, 0, 40, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
][..],
);
let ihu = super::Update {
flags: super::UPDATE_FLAG_PREFIX | super::UPDATE_FLAG_ROUTER_ID,
interval: 1000,
seqno: 42.into(),
metric: 769.into(),
subnet: Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("92 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([4u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::Update::from_bytes(&mut buf, buf_len as u8),
Some(ihu)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let hello_src = super::Update::new(
Duration::from_secs(64),
10.into(),
25.into(),
Subnet::new(
Ipv6Addr::new(0x21f, 0x4025, 0xabcd, 0xdead, 0, 0, 0, 0).into(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed"),
RouterId::from([6; RouterId::BYTE_SIZE]),
);
hello_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::Update::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(hello_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel/hello.rs | mycelium/src/babel/hello.rs | //! The babel [Hello TLV](https://datatracker.ietf.org/doc/html/rfc8966#section-4.6.5).
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::sequence_number::SeqNo;
/// Flag bit indicating a [`Hello`] is sent as unicast hello.
const HELLO_FLAG_UNICAST: u16 = 0x8000;
/// Mask to apply to [`Hello`] flags, leaving only valid flags.
const FLAG_MASK: u16 = 0b10000000_00000000;
/// Wire size of a [`Hello`] TLV without TLV header.
const HELLO_WIRE_SIZE: u8 = 6;
/// Hello TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#section-4.6.5.
#[derive(Debug, Clone, PartialEq)]
pub struct Hello {
flags: u16,
seqno: SeqNo,
interval: u16,
}
impl Hello {
/// Create a new unicast hello packet.
pub fn new_unicast(seqno: SeqNo, interval: u16) -> Self {
Self {
flags: HELLO_FLAG_UNICAST,
seqno,
interval,
}
}
/// Calculates the size on the wire of this `Hello`.
pub fn wire_size(&self) -> u8 {
HELLO_WIRE_SIZE
}
/// Construct a `Hello` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `Hello`.
pub fn from_bytes(src: &mut bytes::BytesMut) -> Self {
let flags = src.get_u16() & FLAG_MASK;
let seqno = src.get_u16().into();
let interval = src.get_u16();
trace!("Read hello tlv body");
Self {
flags,
seqno,
interval,
}
}
/// Encode this `Hello` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u16(self.flags);
dst.put_u16(self.seqno.into());
dst.put_u16(self.interval);
}
}
#[cfg(test)]
mod tests {
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let hello = super::Hello {
flags: 0,
seqno: 25.into(),
interval: 400,
};
hello.write_bytes(&mut buf);
assert_eq!(buf.len(), 6);
assert_eq!(buf[..6], [0, 0, 0, 25, 1, 144]);
let mut buf = bytes::BytesMut::new();
let hello = super::Hello {
flags: super::HELLO_FLAG_UNICAST,
seqno: 16.into(),
interval: 4000,
};
hello.write_bytes(&mut buf);
assert_eq!(buf.len(), 6);
assert_eq!(buf[..6], [128, 0, 0, 16, 15, 160]);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(&[0b10000000u8, 0b00000000, 0, 19, 2, 1][..]);
let hello = super::Hello {
flags: super::HELLO_FLAG_UNICAST,
seqno: 19.into(),
interval: 513,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[0b00000000u8, 0b00000000, 1, 19, 200, 100][..]);
let hello = super::Hello {
flags: 0,
seqno: 275.into(),
interval: 51300,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_flag_bits() {
let mut buf = bytes::BytesMut::from(&[0b10001001u8, 0b00000000, 0, 100, 1, 144][..]);
let hello = super::Hello {
flags: super::HELLO_FLAG_UNICAST,
seqno: 100.into(),
interval: 400,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[0b00001001u8, 0b00000000, 0, 100, 1, 144][..]);
let hello = super::Hello {
flags: 0,
seqno: 100.into(),
interval: 400,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let hello_src = super::Hello::new_unicast(16.into(), 400);
hello_src.write_bytes(&mut buf);
let decoded = super::Hello::from_bytes(&mut buf);
assert_eq!(hello_src, decoded);
assert_eq!(buf.remaining(), 0);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium/src/babel/seqno_request.rs | mycelium/src/babel/seqno_request.rs | use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
num::NonZeroU8,
};
use bytes::{Buf, BufMut};
use tracing::{debug, trace};
use crate::{router_id::RouterId, sequence_number::SeqNo, subnet::Subnet};
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// The default HOP COUNT value used in new SeqNo requests, as per https://datatracker.ietf.org/doc/html/rfc8966#section-3.8.2.1
// SAFETY: value is not zero.
const DEFAULT_HOP_COUNT: NonZeroU8 = NonZeroU8::new(64).unwrap();
/// Base wire size of a [`SeqNoRequest`] without variable length address encoding.
const SEQNO_REQUEST_BASE_WIRE_SIZE: u8 = 6 + RouterId::BYTE_SIZE as u8;
/// Seqno request TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-seqno-request
#[derive(Debug, Clone, PartialEq)]
pub struct SeqNoRequest {
/// The sequence number that is being requested.
seqno: SeqNo,
/// The maximum number of times this TLV may be forwarded, plus 1.
hop_count: NonZeroU8,
/// The router id that is being requested.
router_id: RouterId,
/// The prefix being requested
prefix: Subnet,
}
impl SeqNoRequest {
/// Create a new `SeqNoRequest` for the given [prefix](Subnet) advertised by the [`RouterId`],
/// with the required new [`SeqNo`].
pub fn new(seqno: SeqNo, router_id: RouterId, prefix: Subnet) -> SeqNoRequest {
Self {
seqno,
hop_count: DEFAULT_HOP_COUNT,
router_id,
prefix,
}
}
/// Return the [`prefix`](Subnet) associated with this `SeqNoRequest`.
pub fn prefix(&self) -> Subnet {
self.prefix
}
/// Return the [`RouterId`] associated with this `SeqNoRequest`.
pub fn router_id(&self) -> RouterId {
self.router_id
}
/// Return the requested [`SeqNo`] associated with this `SeqNoRequest`.
pub fn seqno(&self) -> SeqNo {
self.seqno
}
/// Get the hop count for this `SeqNoRequest`.
pub fn hop_count(&self) -> u8 {
self.hop_count.into()
}
/// Decrement the hop count for this `SeqNoRequest`.
///
/// # Panics
///
/// This function will panic if the hop count before calling this function is 1, as that will
/// result in a hop count of 0, which is illegal for a `SeqNoRequest`. It is up to the caller
/// to ensure this condition holds.
pub fn decrement_hop_count(&mut self) {
// SAFETY: The panic from this expect is documented in the function signature.
self.hop_count = NonZeroU8::new(self.hop_count.get() - 1)
.expect("Decrementing a hop count of 1 is not allowed");
}
/// Calculates the size on the wire of this `Update`.
pub fn wire_size(&self) -> u8 {
SEQNO_REQUEST_BASE_WIRE_SIZE + self.prefix.prefix_len().div_ceil(8)
// TODO: Wildcard should be encoded differently
}
/// Construct a `SeqNoRequest` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `SeqNoRequest`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let ae = src.get_u8();
let plen = src.get_u8();
let seqno = src.get_u16().into();
let hop_count = src.get_u8();
// Read "reserved" value, we assume this is 0
let _ = src.get_u8();
let mut router_id_bytes = [0u8; RouterId::BYTE_SIZE];
router_id_bytes.copy_from_slice(&src[..RouterId::BYTE_SIZE]);
src.advance(RouterId::BYTE_SIZE);
let router_id = RouterId::from(router_id_bytes);
let prefix_size = plen.div_ceil(8) as usize;
let prefix = match ae {
AE_WILDCARD => {
if plen != 0 {
return None;
}
// TODO: this is a temporary placeholder until we figure out how to handle this
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into()
}
AE_IPV4 => {
if plen > 32 {
return None;
}
let mut raw_ip = [0; 4];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv4Addr::from(raw_ip).into()
}
AE_IPV6 => {
if plen > 128 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv6Addr::from(raw_ip).into()
}
AE_IPV6_LL => {
if plen != 64 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Ipv6Addr::from(raw_ip).into()
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in seqno_request packet, drop packet");
src.advance(len as usize - 46);
return None;
}
};
let prefix = Subnet::new(prefix, plen).ok()?;
trace!("Read seqno_request tlv body");
// Make sure hop_count is valid
let hop_count = if let Some(hc) = NonZeroU8::new(hop_count) {
hc
} else {
debug!("Dropping seqno_request as hop_count field is set to 0");
return None;
};
Some(SeqNoRequest {
seqno,
hop_count,
router_id,
prefix,
})
}
/// Encode this `SeqNoRequest` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(match self.prefix.address() {
IpAddr::V4(_) => AE_IPV4,
IpAddr::V6(_) => AE_IPV6,
});
dst.put_u8(self.prefix.prefix_len());
dst.put_u16(self.seqno.into());
dst.put_u8(self.hop_count.into());
// Write "reserved" value.
dst.put_u8(0);
dst.put_slice(&self.router_id.as_bytes()[..]);
let prefix_len = self.prefix.prefix_len().div_ceil(8) as usize;
match self.prefix.address() {
IpAddr::V4(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
IpAddr::V6(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
}
}
}
#[cfg(test)]
mod tests {
use std::{
net::{Ipv4Addr, Ipv6Addr},
num::NonZeroU8,
};
use crate::{router_id::RouterId, subnet::Subnet};
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let snr = super::SeqNoRequest {
seqno: 17.into(),
hop_count: NonZeroU8::new(64).unwrap(),
prefix: Subnet::new(Ipv6Addr::new(512, 25, 26, 27, 28, 0, 0, 29).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([1u8; RouterId::BYTE_SIZE]),
};
snr.write_bytes(&mut buf);
assert_eq!(buf.len(), 54);
assert_eq!(
buf[..54],
[
2, 64, 0, 17, 64, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 25, 0, 26, 0, 27,
]
);
let mut buf = bytes::BytesMut::new();
let snr = super::SeqNoRequest {
seqno: 170.into(),
hop_count: NonZeroU8::new(111).unwrap(),
prefix: Subnet::new(Ipv4Addr::new(10, 101, 4, 1).into(), 32)
.expect("32 is a valid IPv4 prefix size; qed"),
router_id: RouterId::from([2u8; RouterId::BYTE_SIZE]),
};
snr.write_bytes(&mut buf);
assert_eq!(buf.len(), 50);
assert_eq!(
buf[..50],
[
1, 32, 0, 170, 111, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 10, 101, 4, 1,
]
);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(
&[
0, 0, 0, 0, 1, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
][..],
);
let snr = super::SeqNoRequest {
hop_count: NonZeroU8::new(1).unwrap(),
seqno: 0.into(),
prefix: Subnet::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into(), 0)
.expect("0 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([3u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
Some(snr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(
&[
3, 64, 0, 42, 232, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 10, 0, 20, 0, 30, 0,
40,
][..],
);
let snr = super::SeqNoRequest {
seqno: 42.into(),
hop_count: NonZeroU8::new(232).unwrap(),
prefix: Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("92 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([4u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
Some(snr)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
4, 64, 0, 0, 44, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21,
][..],
);
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
None
);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_hop_count() {
// Set all flag bits, only allowed bits should be set on the decoded value
let mut buf = bytes::BytesMut::from(
&[
3, 64, 92, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 10, 0, 20, 0, 30, 0,
40,
][..],
);
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
None
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let seqno_src = super::SeqNoRequest::new(
64.into(),
RouterId::from([6; RouterId::BYTE_SIZE]),
Subnet::new(
Ipv6Addr::new(0x21f, 0x4025, 0xabcd, 0xdead, 0, 0, 0, 0).into(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed"),
);
seqno_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(seqno_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/lib.rs | mycelium-api/src/lib.rs | use core::fmt;
use std::{
net::{IpAddr, Ipv6Addr, SocketAddr},
str::FromStr,
sync::Arc,
};
use axum::{
extract::{Path, State},
http::StatusCode,
response::IntoResponse,
routing::{delete, get},
Json, Router,
};
use serde::{de, Deserialize, Deserializer, Serialize};
use tokio::{sync::Mutex, time::Instant};
use tracing::{debug, error};
use mycelium::{
crypto::PublicKey,
endpoint::Endpoint,
metrics::Metrics,
peer_manager::{PeerExists, PeerNotFound, PeerStats},
};
const INFINITE_STR: &str = "infinite";
#[cfg(feature = "message")]
mod message;
#[cfg(feature = "message")]
pub use message::{MessageDestination, MessageReceiveInfo, MessageSendInfo, PushMessageResponse};
pub use rpc::JsonRpc;
// JSON-RPC API implementation
pub mod rpc;
/// Http API server handle. The server is spawned in a background task. If this handle is dropped,
/// the server is terminated.
pub struct Http {
/// Channel to send cancellation to the http api server. We just keep a reference to it since
/// dropping it will also cancel the receiver and thus the server.
_cancel_tx: tokio::sync::oneshot::Sender<()>,
}
#[derive(Clone)]
/// Shared state accessible in HTTP endpoint handlers.
pub struct ServerState<M> {
/// Access to the (`node`)(mycelium::Node) state.
pub node: Arc<Mutex<mycelium::Node<M>>>,
}
impl Http {
/// Spawns a new HTTP API server on the provided listening address.
pub fn spawn<M>(node: Arc<Mutex<mycelium::Node<M>>>, listen_addr: SocketAddr) -> Self
where
M: Metrics + Clone + Send + Sync + 'static,
{
let server_state = ServerState { node };
let admin_routes = Router::new()
.route("/admin", get(get_info))
.route("/admin/peers", get(get_peers).post(add_peer))
.route("/admin/peers/{endpoint}", delete(delete_peer))
.route("/admin/routes/selected", get(get_selected_routes))
.route("/admin/routes/fallback", get(get_fallback_routes))
.route("/admin/routes/queried", get(get_queried_routes))
.route("/admin/routes/no_route", get(get_no_route_entries))
.route(
"/admin/proxy",
get(list_proxies)
.post(connect_proxy)
.delete(disconnect_proxy),
)
.route("/admin/proxy/probe", get(start_probe).delete(stop_probe))
.route("/pubkey/{ip}", get(get_pubk_from_ip))
.with_state(server_state.clone());
let app = Router::new().nest("/api/v1", admin_routes);
#[cfg(feature = "message")]
let app = app.nest("/api/v1", message::message_router_v1(server_state));
let (_cancel_tx, cancel_rx) = tokio::sync::oneshot::channel();
tokio::spawn(async move {
let listener = match tokio::net::TcpListener::bind(listen_addr).await {
Ok(listener) => listener,
Err(e) => {
error!(err=%e, "Failed to bind listener for Http Api server");
error!("API disabled");
return;
}
};
let server =
axum::serve(listener, app.into_make_service()).with_graceful_shutdown(async {
cancel_rx.await.ok();
});
if let Err(e) = server.await {
error!(err=%e, "Http API server error");
}
});
Http { _cancel_tx }
}
}
/// Get the stats of the current known peers
async fn get_peers<M>(State(state): State<ServerState<M>>) -> Json<Vec<PeerStats>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Fetching peer stats");
Json(state.node.lock().await.peer_info())
}
/// Payload of an add_peer request
#[derive(Deserialize, Serialize)]
pub struct AddPeer {
/// The endpoint used to connect to the peer
pub endpoint: String,
}
/// Add a new peer to the system
async fn add_peer<M>(
State(state): State<ServerState<M>>,
Json(payload): Json<AddPeer>,
) -> Result<StatusCode, (StatusCode, String)>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(
peer.endpoint = payload.endpoint,
"Attempting to add peer to the system"
);
let endpoint = match Endpoint::from_str(&payload.endpoint) {
Ok(endpoint) => endpoint,
Err(e) => return Err((StatusCode::BAD_REQUEST, e.to_string())),
};
match state.node.lock().await.add_peer(endpoint) {
Ok(()) => Ok(StatusCode::NO_CONTENT),
Err(PeerExists) => Err((
StatusCode::CONFLICT,
"A peer identified by that endpoint already exists".to_string(),
)),
}
}
/// remove an existing peer from the system
async fn delete_peer<M>(
State(state): State<ServerState<M>>,
Path(endpoint): Path<String>,
) -> Result<StatusCode, (StatusCode, String)>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(peer.endpoint=%endpoint, "Attempting to remove peer from the system");
let endpoint = match Endpoint::from_str(&endpoint) {
Ok(endpoint) => endpoint,
Err(e) => return Err((StatusCode::BAD_REQUEST, e.to_string())),
};
match state.node.lock().await.remove_peer(endpoint) {
Ok(()) => Ok(StatusCode::NO_CONTENT),
Err(PeerNotFound) => Err((
StatusCode::NOT_FOUND,
"A peer identified by that endpoint does not exist".to_string(),
)),
}
}
/// Alias to a [`Metric`](crate::metric::Metric) for serialization in the API.
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)]
pub enum Metric {
/// Finite metric
Value(u16),
/// Infinite metric
Infinite,
}
/// Info about a route. This uses base types only to avoid having to introduce too many Serialize
/// bounds in the core types.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, PartialOrd, Eq, Ord)]
#[serde(rename_all = "camelCase")]
pub struct Route {
/// We convert the [`subnet`](Subnet) to a string to avoid introducing a bound on the actual
/// type.
pub subnet: String,
/// Next hop of the route, in the underlay.
pub next_hop: String,
/// Computed metric of the route.
pub metric: Metric,
/// Sequence number of the route.
pub seqno: u16,
}
/// List all currently selected routes.
async fn get_selected_routes<M>(State(state): State<ServerState<M>>) -> Json<Vec<Route>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading selected routes");
let routes = state
.node
.lock()
.await
.selected_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Json(routes)
}
/// List all active fallback routes.
async fn get_fallback_routes<M>(State(state): State<ServerState<M>>) -> Json<Vec<Route>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading fallback routes");
let routes = state
.node
.lock()
.await
.fallback_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Json(routes)
}
/// Info about a queried subnet. This uses base types only to avoid having to introduce too
/// many Serialize bounds in the core types.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, PartialOrd, Eq, Ord)]
#[serde(rename_all = "camelCase")]
pub struct QueriedSubnet {
/// We convert the [`subnet`](Subnet) to a string to avoid introducing a bound on the actual
/// type.
pub subnet: String,
/// The amount of time left before the query expires.
pub expiration: String,
}
async fn get_queried_routes<M>(State(state): State<ServerState<M>>) -> Json<Vec<QueriedSubnet>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading queried subnets");
let queries = state
.node
.lock()
.await
.queried_subnets()
.into_iter()
.map(|qs| QueriedSubnet {
subnet: qs.subnet().to_string(),
expiration: qs
.query_expires()
.duration_since(Instant::now())
.as_secs()
.to_string(),
})
.collect();
Json(queries)
}
/// Info about a subnet with no route. This uses base types only to avoid having to introduce too
/// many Serialize bounds in the core types.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, PartialOrd, Eq, Ord)]
#[serde(rename_all = "camelCase")]
pub struct NoRouteSubnet {
/// We convert the [`subnet`](Subnet) to a string to avoid introducing a bound on the actual
/// type.
pub subnet: String,
/// The amount of time left before the query expires.
pub expiration: String,
}
async fn get_no_route_entries<M>(State(state): State<ServerState<M>>) -> Json<Vec<NoRouteSubnet>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading no-route subnets");
let queries = state
.node
.lock()
.await
.no_route_entries()
.into_iter()
.map(|nrs| NoRouteSubnet {
subnet: nrs.subnet().to_string(),
expiration: nrs
.entry_expires()
.duration_since(Instant::now())
.as_secs()
.to_string(),
})
.collect();
Json(queries)
}
async fn list_proxies<M>(State(state): State<ServerState<M>>) -> Json<Vec<Ipv6Addr>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Listing known proxies");
Json(state.node.lock().await.known_proxies())
}
#[derive(Deserialize)]
pub struct ConnectProxyInput {
remote: Option<SocketAddr>,
}
async fn connect_proxy<M>(
State(state): State<ServerState<M>>,
Json(ConnectProxyInput { remote }): Json<ConnectProxyInput>,
) -> impl IntoResponse
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Attempting to connect remote proxy");
state
.node
.lock()
.await
.connect_proxy(remote)
.await
.map(Json)
// An error indicates we don't have a valid good auto discovered proxy -> No proxy no
// content
.map_err(|_| StatusCode::NOT_FOUND)
}
async fn disconnect_proxy<M>(State(state): State<ServerState<M>>) -> impl IntoResponse
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Disconnecting from connect remote proxy");
state.node.lock().await.disconnect_proxy();
StatusCode::NO_CONTENT
}
async fn start_probe<M>(State(state): State<ServerState<M>>) -> impl IntoResponse
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Starting proxy probe");
state.node.lock().await.start_proxy_scan();
StatusCode::NO_CONTENT
}
async fn stop_probe<M>(State(state): State<ServerState<M>>) -> impl IntoResponse
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Stopping proxy probe");
state.node.lock().await.stop_proxy_scan();
StatusCode::NO_CONTENT
}
/// General info about a node.
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Info {
/// The overlay subnet in use by the node.
pub node_subnet: String,
/// The public key of the node
pub node_pubkey: PublicKey,
}
/// Get general info about the node.
async fn get_info<M>(State(state): State<ServerState<M>>) -> Json<Info>
where
M: Metrics + Clone + Send + Sync + 'static,
{
let info = state.node.lock().await.info();
Json(Info {
node_subnet: info.node_subnet.to_string(),
node_pubkey: info.node_pubkey,
})
}
/// Public key from a node.
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PubKey {
/// The public key from the node
pub public_key: PublicKey,
}
/// Get public key from IP.
async fn get_pubk_from_ip<M>(
State(state): State<ServerState<M>>,
Path(ip): Path<IpAddr>,
) -> Result<Json<PubKey>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
match state.node.lock().await.get_pubkey_from_ip(ip) {
Some(pubkey) => Ok(Json(PubKey { public_key: pubkey })),
None => Err(StatusCode::NOT_FOUND),
}
}
impl Serialize for Metric {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Self::Infinite => serializer.serialize_str(INFINITE_STR),
Self::Value(v) => serializer.serialize_u16(*v),
}
}
}
impl<'de> Deserialize<'de> for Metric {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct MetricVisitor;
impl serde::de::Visitor<'_> for MetricVisitor {
type Value = Metric;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a string or a u16")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match value {
INFINITE_STR => Ok(Metric::Infinite),
_ => Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(value),
&format!("expected '{INFINITE_STR}'").as_str(),
)),
}
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
if value <= u16::MAX as u64 {
Ok(Metric::Value(value as u16))
} else {
Err(E::invalid_value(
de::Unexpected::Unsigned(value),
&"expected a non-negative integer within the range of u16",
))
}
}
}
deserializer.deserialize_any(MetricVisitor)
}
}
impl fmt::Display for Metric {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Value(val) => write!(f, "{val}"),
Self::Infinite => write!(f, "{INFINITE_STR}"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn finite_metric_serialization() {
let metric = super::Metric::Value(10);
let s = serde_json::to_string(&metric).expect("can encode finite metric");
assert_eq!("10", s);
}
#[test]
fn infinite_metric_serialization() {
let metric = super::Metric::Infinite;
let s = serde_json::to_string(&metric).expect("can encode infinite metric");
assert_eq!(format!("\"{INFINITE_STR}\""), s);
}
#[test]
fn test_deserialize_metric() {
// Test deserialization of a Metric::Value
let json_value = json!(20);
let metric: Metric = serde_json::from_value(json_value).unwrap();
assert_eq!(metric, Metric::Value(20));
// Test deserialization of a Metric::Infinite
let json_infinite = json!(INFINITE_STR);
let metric: Metric = serde_json::from_value(json_infinite).unwrap();
assert_eq!(metric, Metric::Infinite);
// Test deserialization of an invalid metric
let json_invalid = json!("invalid");
let result: Result<Metric, _> = serde_json::from_value(json_invalid);
assert!(result.is_err());
}
#[test]
fn test_deserialize_route() {
let json_data = r#"
[
{"subnet":"406:1d77:2438:aa7c::/64","nextHop":"TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651","metric":20,"seqno":0},
{"subnet":"407:8458:dbf5:4ed7::/64","nextHop":"TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651","metric":174,"seqno":0},
{"subnet":"408:7ba3:3a4d:808a::/64","nextHop":"TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651","metric":"infinite","seqno":0}
]
"#;
let routes: Vec<Route> = serde_json::from_str(json_data).unwrap();
assert_eq!(routes[0], Route {
subnet: "406:1d77:2438:aa7c::/64".to_string(),
next_hop: "TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651".to_string(),
metric: Metric::Value(20),
seqno: 0
});
assert_eq!(routes[1], Route {
subnet: "407:8458:dbf5:4ed7::/64".to_string(),
next_hop: "TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651".to_string(),
metric: Metric::Value(174),
seqno: 0
});
assert_eq!(routes[2], Route {
subnet: "408:7ba3:3a4d:808a::/64".to_string(),
next_hop: "TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651".to_string(),
metric: Metric::Infinite,
seqno: 0
});
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc.rs | mycelium-api/src/rpc.rs | //! JSON-RPC API implementation for Mycelium
mod spec;
use std::net::{Ipv6Addr, SocketAddr};
#[cfg(feature = "message")]
use std::ops::Deref;
use std::str::FromStr;
use std::sync::Arc;
#[cfg(feature = "message")]
use base64::Engine;
use jsonrpsee::core::RpcResult;
use jsonrpsee::proc_macros::rpc;
use jsonrpsee::server::{ServerBuilder, ServerHandle};
use jsonrpsee::types::{ErrorCode, ErrorObject};
#[cfg(feature = "message")]
use mycelium::subnet::Subnet;
#[cfg(feature = "message")]
use std::path::PathBuf;
use tokio::sync::Mutex;
#[cfg(feature = "message")]
use tokio::time::Duration;
use tracing::debug;
use crate::{Info, Metric, NoRouteSubnet, QueriedSubnet, Route, ServerState};
use mycelium::crypto::PublicKey;
use mycelium::endpoint::Endpoint;
use mycelium::metrics::Metrics;
use mycelium::peer_manager::{PeerExists, PeerNotFound, PeerStats};
use self::spec::OPENRPC_SPEC;
// Define the base RPC API trait using jsonrpsee macros
#[rpc(server)]
pub trait MyceliumApi {
// Admin methods
#[method(name = "getInfo")]
async fn get_info(&self) -> RpcResult<Info>;
#[method(name = "getPublicKeyFromIp")]
async fn get_pubkey_from_ip(&self, ip: String) -> RpcResult<PublicKey>;
// Peer methods
#[method(name = "getPeers")]
async fn get_peers(&self) -> RpcResult<Vec<PeerStats>>;
#[method(name = "addPeer")]
async fn add_peer(&self, endpoint: String) -> RpcResult<bool>;
#[method(name = "deletePeer")]
async fn delete_peer(&self, endpoint: String) -> RpcResult<bool>;
// Route methods
#[method(name = "getSelectedRoutes")]
async fn get_selected_routes(&self) -> RpcResult<Vec<Route>>;
#[method(name = "getFallbackRoutes")]
async fn get_fallback_routes(&self) -> RpcResult<Vec<Route>>;
#[method(name = "getQueriedSubnets")]
async fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>>;
#[method(name = "getNoRouteEntries")]
async fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>>;
// Proxy methods
#[method(name = "getProxies")]
async fn get_proxies(&self) -> RpcResult<Vec<Ipv6Addr>>;
#[method(name = "connectProxy")]
async fn connect_proxy(&self, remote: Option<SocketAddr>) -> RpcResult<SocketAddr>;
#[method(name = "disconnectProxy")]
async fn disconnect_proxy(&self) -> RpcResult<bool>;
#[method(name = "startProxyProbe")]
async fn start_proxy_probe(&self) -> RpcResult<bool>;
#[method(name = "stopProxyProbe")]
async fn stop_proxy_probe(&self) -> RpcResult<bool>;
// OpenRPC discovery
#[method(name = "rpc.discover")]
async fn discover(&self) -> RpcResult<serde_json::Value>;
}
// Define a separate message API trait that is only compiled when the message feature is enabled
#[cfg(feature = "message")]
#[rpc(server)]
pub trait MyceliumMessageApi {
// Message methods
#[method(name = "popMessage")]
async fn pop_message(
&self,
peek: Option<bool>,
timeout: Option<u64>,
topic: Option<String>,
) -> RpcResult<Option<crate::message::MessageReceiveInfo>>;
#[method(name = "pushMessage")]
async fn push_message(
&self,
message: crate::message::MessageSendInfo,
reply_timeout: Option<u64>,
) -> RpcResult<crate::message::PushMessageResponse>;
#[method(name = "pushMessageReply")]
async fn push_message_reply(
&self,
id: String,
message: crate::message::MessageSendInfo,
) -> RpcResult<bool>;
#[method(name = "getMessageInfo")]
async fn get_message_info(&self, id: String) -> RpcResult<mycelium::message::MessageInfo>;
// Topic configuration methods
#[method(name = "getDefaultTopicAction")]
async fn get_default_topic_action(&self) -> RpcResult<bool>;
#[method(name = "setDefaultTopicAction")]
async fn set_default_topic_action(&self, accept: bool) -> RpcResult<bool>;
#[method(name = "getTopics")]
async fn get_topics(&self) -> RpcResult<Vec<String>>;
#[method(name = "addTopic")]
async fn add_topic(&self, topic: String) -> RpcResult<bool>;
#[method(name = "removeTopic")]
async fn remove_topic(&self, topic: String) -> RpcResult<bool>;
#[method(name = "getTopicSources")]
async fn get_topic_sources(&self, topic: String) -> RpcResult<Vec<String>>;
#[method(name = "addTopicSource")]
async fn add_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool>;
#[method(name = "removeTopicSource")]
async fn remove_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool>;
#[method(name = "getTopicForwardSocket")]
async fn get_topic_forward_socket(&self, topic: String) -> RpcResult<Option<String>>;
#[method(name = "setTopicForwardSocket")]
async fn set_topic_forward_socket(&self, topic: String, socket_path: String)
-> RpcResult<bool>;
#[method(name = "removeTopicForwardSocket")]
async fn remove_topic_forward_socket(&self, topic: String) -> RpcResult<bool>;
}
// Implement the API trait
#[derive(Clone)]
struct RPCApi<M> {
state: Arc<ServerState<M>>,
}
// Implement the base API trait
#[async_trait::async_trait]
impl<M> MyceliumApiServer for RPCApi<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
async fn get_info(&self) -> RpcResult<Info> {
debug!("Getting node info via RPC");
let node_info = self.state.node.lock().await.info();
Ok(Info {
node_subnet: node_info.node_subnet.to_string(),
node_pubkey: node_info.node_pubkey,
})
}
async fn get_pubkey_from_ip(&self, ip_str: String) -> RpcResult<PublicKey> {
debug!(ip = %ip_str, "Getting public key from IP via RPC");
let ip = std::net::IpAddr::from_str(&ip_str)
.map_err(|_| ErrorObject::from(ErrorCode::from(-32007)))?;
let pubkey = self.state.node.lock().await.get_pubkey_from_ip(ip);
match pubkey {
Some(pk) => Ok(pk),
None => Err(ErrorObject::from(ErrorCode::from(-32008))),
}
}
async fn get_peers(&self) -> RpcResult<Vec<PeerStats>> {
debug!("Fetching peer stats via RPC");
let peers = self.state.node.lock().await.peer_info();
Ok(peers)
}
async fn add_peer(&self, endpoint_str: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint_str,
"Attempting to add peer to the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint_str)
.map_err(|_| ErrorObject::from(ErrorCode::from(-32009)))?;
match self.state.node.lock().await.add_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerExists) => Err(ErrorObject::from(ErrorCode::from(-32010))),
}
}
async fn delete_peer(&self, endpoint_str: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint_str,
"Attempting to remove peer from the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint_str)
.map_err(|_| ErrorObject::from(ErrorCode::from(-32012)))?;
match self.state.node.lock().await.remove_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerNotFound) => Err(ErrorObject::from(ErrorCode::from(-32011))),
}
}
async fn get_selected_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading selected routes via RPC");
let routes = self
.state
.node
.lock()
.await
.selected_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
async fn get_fallback_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading fallback routes via RPC");
let routes = self
.state
.node
.lock()
.await
.fallback_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
async fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>> {
debug!("Loading queried subnets via RPC");
let queries = self
.state
.node
.lock()
.await
.queried_subnets()
.into_iter()
.map(|qs| QueriedSubnet {
subnet: qs.subnet().to_string(),
expiration: qs
.query_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(queries)
}
async fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>> {
debug!("Loading no route entries via RPC");
let entries = self
.state
.node
.lock()
.await
.no_route_entries()
.into_iter()
.map(|nrs| NoRouteSubnet {
subnet: nrs.subnet().to_string(),
expiration: nrs
.entry_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(entries)
}
async fn get_proxies(&self) -> RpcResult<Vec<Ipv6Addr>> {
debug!("Listing known proxies via RPC");
let proxies = self.state.node.lock().await.known_proxies();
Ok(proxies)
}
async fn connect_proxy(&self, remote: Option<SocketAddr>) -> RpcResult<SocketAddr> {
debug!(?remote, "Attempting to connect remote proxy via RPC");
// Attempt to connect; map error to "no proxy available/valid" like HTTP 404 counterpart
let res = self.state.node.lock().await.connect_proxy(remote).await;
match res {
Ok(addr) => Ok(addr),
Err(_) => Err(ErrorObject::from(ErrorCode::from(-32032))),
}
}
async fn disconnect_proxy(&self) -> RpcResult<bool> {
debug!("Disconnecting from remote proxy via RPC");
self.state.node.lock().await.disconnect_proxy();
Ok(true)
}
async fn start_proxy_probe(&self) -> RpcResult<bool> {
debug!("Starting proxy probe via RPC");
self.state.node.lock().await.start_proxy_scan();
Ok(true)
}
async fn stop_proxy_probe(&self) -> RpcResult<bool> {
debug!("Stopping proxy probe via RPC");
self.state.node.lock().await.stop_proxy_scan();
Ok(true)
}
async fn discover(&self) -> RpcResult<serde_json::Value> {
let spec = serde_json::from_str::<serde_json::Value>(OPENRPC_SPEC)
.expect("Failed to parse OpenRPC spec");
Ok(spec)
}
}
// Implement the message API trait only when the message feature is enabled
#[cfg(feature = "message")]
#[async_trait::async_trait]
impl<M> MyceliumMessageApiServer for RPCApi<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
async fn pop_message(
&self,
peek: Option<bool>,
timeout: Option<u64>,
topic: Option<String>,
) -> RpcResult<Option<crate::message::MessageReceiveInfo>> {
debug!(
"Attempt to get message via RPC, peek {}, timeout {} seconds",
peek.unwrap_or(false),
timeout.unwrap_or(0)
);
let topic_bytes = if let Some(topic_str) = topic {
Some(
base64::engine::general_purpose::STANDARD
.decode(topic_str.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32013)))?,
)
} else {
None
};
// A timeout of 0 seconds essentially means get a message if there is one, and return
// immediately if there isn't.
let result = tokio::time::timeout(
Duration::from_secs(timeout.unwrap_or(0)),
self.state
.node
.lock()
.await
.get_message(!peek.unwrap_or(false), topic_bytes),
)
.await;
match result {
Ok(m) => Ok(Some(crate::message::MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() {
None
} else {
Some(m.topic)
},
payload: m.data,
})),
Err(_) => Ok(None),
}
}
async fn push_message(
&self,
message: crate::message::MessageSendInfo,
reply_timeout: Option<u64>,
) -> RpcResult<crate::message::PushMessageResponse> {
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
let result = self.state.node.lock().await.push_message(
dst,
message.payload,
message.topic,
DEFAULT_MESSAGE_TRY_DURATION,
reply_timeout.is_some(),
);
let (id, sub) = match result {
Ok((id, sub)) => (id, sub),
Err(_) => {
return Err(ErrorObject::from(ErrorCode::from(-32015)));
}
};
if reply_timeout.is_none() {
// If we don't wait for the reply just return here.
return Ok(crate::message::PushMessageResponse::Id(
crate::message::MessageIdReply { id },
));
}
let mut sub = sub.unwrap();
// Wait for reply with timeout
tokio::select! {
sub_res = sub.changed() => {
match sub_res {
Ok(_) => {
if let Some(m) = sub.borrow().deref() {
Ok(crate::message::PushMessageResponse::Reply(crate::message::MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() { None } else { Some(m.topic.clone()) },
payload: m.data.clone(),
}))
} else {
// This happens if a none value is send, which should not happen.
Err(ErrorObject::from(ErrorCode::from(-32016)))
}
}
Err(_) => {
// This happens if the sender drops, which should not happen.
Err(ErrorObject::from(ErrorCode::from(-32017)))
}
}
},
_ = tokio::time::sleep(Duration::from_secs(reply_timeout.unwrap_or(0))) => {
// Timeout expired while waiting for reply
Ok(crate::message::PushMessageResponse::Id(crate::message::MessageIdReply { id }))
}
}
}
async fn push_message_reply(
&self,
id: String,
message: crate::message::MessageSendInfo,
) -> RpcResult<bool> {
let message_id = match mycelium::message::MessageId::from_hex(id.as_bytes()) {
Ok(id) => id,
Err(_) => {
return Err(ErrorObject::from(ErrorCode::from(-32018)));
}
};
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.id=id,
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new reply to message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
self.state.node.lock().await.reply_message(
message_id,
dst,
message.payload,
DEFAULT_MESSAGE_TRY_DURATION,
);
Ok(true)
}
async fn get_message_info(&self, id: String) -> RpcResult<mycelium::message::MessageInfo> {
let message_id = match mycelium::message::MessageId::from_hex(id.as_bytes()) {
Ok(id) => id,
Err(_) => {
return Err(ErrorObject::from(ErrorCode::from(-32020)));
}
};
debug!(message.id=%id, "Fetching message status via RPC");
let result = self.state.node.lock().await.message_status(message_id);
match result {
Some(info) => Ok(info),
None => Err(ErrorObject::from(ErrorCode::from(-32019))),
}
}
// Topic configuration methods implementation
async fn get_default_topic_action(&self) -> RpcResult<bool> {
debug!("Getting default topic action via RPC");
let accept = self.state.node.lock().await.unconfigure_topic_action();
Ok(accept)
}
async fn set_default_topic_action(&self, accept: bool) -> RpcResult<bool> {
debug!(accept=%accept, "Setting default topic action via RPC");
self.state
.node
.lock()
.await
.accept_unconfigured_topic(accept);
Ok(true)
}
async fn get_topics(&self) -> RpcResult<Vec<String>> {
debug!("Getting all whitelisted topics via RPC");
let topics = self
.state
.node
.lock()
.await
.topics()
.into_iter()
.map(|topic| base64::engine::general_purpose::STANDARD.encode(&topic))
.collect();
// For now, we'll return an empty list
Ok(topics)
}
async fn add_topic(&self, topic: String) -> RpcResult<bool> {
debug!("Adding topic to whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
self.state
.node
.lock()
.await
.add_topic_whitelist(topic_bytes);
Ok(true)
}
async fn remove_topic(&self, topic: String) -> RpcResult<bool> {
debug!("Removing topic from whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
self.state
.node
.lock()
.await
.remove_topic_whitelist(topic_bytes);
Ok(true)
}
async fn get_topic_sources(&self, topic: String) -> RpcResult<Vec<String>> {
debug!("Getting sources for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
let subnets = self
.state
.node
.lock()
.await
.topic_allowed_sources(&topic_bytes)
.ok_or(ErrorObject::from(ErrorCode::from(-32030)))?
.into_iter()
.map(|subnet| subnet.to_string())
.collect();
Ok(subnets)
}
async fn add_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool> {
debug!("Adding source to topic whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
// Parse the subnet
let subnet_obj = subnet
.parse::<Subnet>()
.map_err(|_| ErrorObject::from(ErrorCode::from(-32023)))?;
self.state
.node
.lock()
.await
.add_topic_whitelist_src(topic_bytes, subnet_obj);
Ok(true)
}
async fn remove_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool> {
debug!("Removing source from topic whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
// Parse the subnet
let subnet_obj = subnet
.parse::<Subnet>()
.map_err(|_| ErrorObject::from(ErrorCode::from(-32023)))?;
self.state
.node
.lock()
.await
.remove_topic_whitelist_src(topic_bytes, subnet_obj);
Ok(true)
}
async fn get_topic_forward_socket(&self, topic: String) -> RpcResult<Option<String>> {
debug!("Getting forward socket for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
let node = self.state.node.lock().await;
let socket_path = node
.get_topic_forward_socket(&topic_bytes)
.map(|p| p.to_string_lossy().to_string());
Ok(socket_path)
}
async fn set_topic_forward_socket(
&self,
topic: String,
socket_path: String,
) -> RpcResult<bool> {
debug!("Setting forward socket for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
let path = PathBuf::from(socket_path);
self.state
.node
.lock()
.await
.set_topic_forward_socket(topic_bytes, path);
Ok(true)
}
async fn remove_topic_forward_socket(&self, topic: String) -> RpcResult<bool> {
debug!("Removing forward socket for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
self.state
.node
.lock()
.await
.delete_topic_forward_socket(topic_bytes);
Ok(true)
}
}
/// JSON-RPC API server handle. The server is spawned in a background task. If this handle is dropped,
/// the server is terminated.
pub struct JsonRpc {
/// JSON-RPC server handle
_server: ServerHandle,
}
impl JsonRpc {
/// Spawns a new JSON-RPC API server on the provided listening address.
///
/// # Arguments
///
/// * `node` - The Mycelium node to use for the JSON-RPC API
/// * `listen_addr` - The address to listen on for JSON-RPC requests
///
/// # Returns
///
/// A `JsonRpc` instance that will be dropped when the server is terminated
pub async fn spawn<M>(node: Arc<Mutex<mycelium::Node<M>>>, listen_addr: SocketAddr) -> Self
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(%listen_addr, "Starting JSON-RPC server");
let server_state = Arc::new(ServerState { node });
// Create the server builder
let server = ServerBuilder::default()
.build(listen_addr)
.await
.expect("Failed to build JSON-RPC server");
// Create the API implementation
let api = RPCApi {
state: server_state,
};
// Register the API implementation
// Create the RPC module
#[allow(unused_mut)]
let mut methods = MyceliumApiServer::into_rpc(api.clone());
// When the message feature is enabled, merge the message RPC module
#[cfg(feature = "message")]
{
let message_methods = MyceliumMessageApiServer::into_rpc(api);
methods
.merge(message_methods)
.expect("Can merge message API into base API");
}
// Start the server with the appropriate module(s)
let handle = server.start(methods);
debug!(%listen_addr, "JSON-RPC server started successfully");
JsonRpc { _server: handle }
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/message.rs | mycelium-api/src/message.rs | use std::{net::IpAddr, ops::Deref, time::Duration};
use axum::{
extract::{Path, Query, State},
http::StatusCode,
routing::{delete, get, post},
Json, Router,
};
use serde::{Deserialize, Serialize};
use tracing::debug;
use mycelium::{
crypto::PublicKey,
message::{MessageId, MessageInfo},
metrics::Metrics,
subnet::Subnet,
};
use std::path::PathBuf;
use super::ServerState;
/// Default amount of time to try and send a message if it is not explicitly specified.
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
/// Return a router which has message endpoints and their handlers mounted.
pub fn message_router_v1<M>(server_state: ServerState<M>) -> Router
where
M: Metrics + Clone + Send + Sync + 'static,
{
Router::new()
.route("/messages", get(get_message).post(push_message))
.route("/messages/status/{id}", get(message_status))
.route("/messages/reply/{id}", post(reply_message))
// Topic configuration endpoints
.route(
"/messages/topics/default",
get(get_default_topic_action).put(set_default_topic_action),
)
.route("/messages/topics", get(get_topics).post(add_topic))
.route("/messages/topics/{topic}", delete(remove_topic))
.route(
"/messages/topics/{topic}/sources",
get(get_topic_sources).post(add_topic_source),
)
.route(
"/messages/topics/{topic}/sources/{subnet}",
delete(remove_topic_source),
)
.route(
"/messages/topics/{topic}/forward",
get(get_topic_forward_socket)
.put(set_topic_forward_socket)
.delete(remove_topic_forward_socket),
)
.with_state(server_state)
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MessageSendInfo {
pub dst: MessageDestination,
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "base64::optional_binary")]
pub topic: Option<Vec<u8>>,
#[serde(with = "base64::binary")]
pub payload: Vec<u8>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum MessageDestination {
Ip(IpAddr),
Pk(PublicKey),
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MessageReceiveInfo {
pub id: MessageId,
pub src_ip: IpAddr,
pub src_pk: PublicKey,
pub dst_ip: IpAddr,
pub dst_pk: PublicKey,
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "base64::optional_binary")]
pub topic: Option<Vec<u8>>,
#[serde(with = "base64::binary")]
pub payload: Vec<u8>,
}
impl MessageDestination {
/// Get the IP address of the destination.
fn ip(self) -> IpAddr {
match self {
MessageDestination::Ip(ip) => ip,
MessageDestination::Pk(pk) => IpAddr::V6(pk.address()),
}
}
}
#[derive(Deserialize)]
struct GetMessageQuery {
peek: Option<bool>,
timeout: Option<u64>,
/// Optional filter for start of the message, base64 encoded.
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "base64::optional_binary")]
topic: Option<Vec<u8>>,
}
impl GetMessageQuery {
/// Did the query indicate we should peek the message instead of pop?
fn peek(&self) -> bool {
matches!(self.peek, Some(true))
}
/// Amount of seconds to hold and try and get values.
fn timeout_secs(&self) -> u64 {
self.timeout.unwrap_or(0)
}
}
async fn get_message<M>(
State(state): State<ServerState<M>>,
Query(query): Query<GetMessageQuery>,
) -> Result<Json<MessageReceiveInfo>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(
"Attempt to get message, peek {}, timeout {} seconds",
query.peek(),
query.timeout_secs()
);
// A timeout of 0 seconds essentially means get a message if there is one, and return
// immediatly if there isn't. This is the result of the implementation of Timeout, which does a
// poll of the internal future first, before polling the delay.
tokio::time::timeout(
Duration::from_secs(query.timeout_secs()),
state
.node
.lock()
.await
.get_message(!query.peek(), query.topic),
)
.await
.or(Err(StatusCode::NO_CONTENT))
.map(|m| {
Json(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() {
None
} else {
Some(m.topic)
},
payload: m.data,
})
})
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MessageIdReply {
pub id: MessageId,
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[serde(untagged)]
pub enum PushMessageResponse {
Reply(MessageReceiveInfo),
Id(MessageIdReply),
}
#[derive(Clone, Deserialize)]
struct PushMessageQuery {
reply_timeout: Option<u64>,
}
impl PushMessageQuery {
/// The user requested to wait for the reply or not.
fn await_reply(&self) -> bool {
self.reply_timeout.is_some()
}
/// Amount of seconds to wait for the reply.
fn timeout(&self) -> u64 {
self.reply_timeout.unwrap_or(0)
}
}
async fn push_message<M>(
State(state): State<ServerState<M>>,
Query(query): Query<PushMessageQuery>,
Json(message_info): Json<MessageSendInfo>,
) -> Result<(StatusCode, Json<PushMessageResponse>), StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
let dst = message_info.dst.ip();
debug!(
message.dst=%dst,
message.len=message_info.payload.len(),
"Pushing new message to stack",
);
let (id, sub) = match state.node.lock().await.push_message(
dst,
message_info.payload,
message_info.topic,
DEFAULT_MESSAGE_TRY_DURATION,
query.await_reply(),
) {
Ok((id, sub)) => (id, sub),
Err(_) => {
return Err(StatusCode::BAD_REQUEST);
}
};
if !query.await_reply() {
// If we don't wait for the reply just return here.
return Ok((
StatusCode::CREATED,
Json(PushMessageResponse::Id(MessageIdReply { id })),
));
}
let mut sub = sub.unwrap();
tokio::select! {
sub_res = sub.changed() => {
match sub_res {
Ok(_) => {
if let Some(m) = sub.borrow().deref() {
Ok((StatusCode::OK, Json(PushMessageResponse::Reply(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() { None } else { Some(m.topic.clone()) },
payload: m.data.clone(),
}))))
} else {
// This happens if a none value is send, which should not happen.
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
Err(_) => {
// This happens if the sender drops, which should not happen.
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
},
_ = tokio::time::sleep(Duration::from_secs(query.timeout())) => {
// Timeout expired while waiting for reply
Ok((StatusCode::REQUEST_TIMEOUT, Json(PushMessageResponse::Id(MessageIdReply { id }))))
}
}
}
async fn reply_message<M>(
State(state): State<ServerState<M>>,
Path(id): Path<MessageId>,
Json(message_info): Json<MessageSendInfo>,
) -> StatusCode
where
M: Metrics + Clone + Send + Sync + 'static,
{
let dst = message_info.dst.ip();
debug!(
message.id=id.as_hex(),
message.dst=%dst,
message.len=message_info.payload.len(),
"Pushing new reply to message stack",
);
state.node.lock().await.reply_message(
id,
dst,
message_info.payload,
DEFAULT_MESSAGE_TRY_DURATION,
);
StatusCode::NO_CONTENT
}
async fn message_status<M>(
State(state): State<ServerState<M>>,
Path(id): Path<MessageId>,
) -> Result<Json<MessageInfo>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(message.id=%id.as_hex(), "Fetching message status");
state
.node
.lock()
.await
.message_status(id)
.ok_or(StatusCode::NOT_FOUND)
.map(Json)
}
/// Module to implement base64 decoding and encoding
/// Sourced from https://users.rust-lang.org/t/serialize-a-vec-u8-to-json-as-base64/57781, with some
/// addaptions to work with the new version of the base64 crate
mod base64 {
use base64::engine::{GeneralPurpose, GeneralPurposeConfig};
use base64::{alphabet, Engine};
const B64ENGINE: GeneralPurpose = base64::engine::general_purpose::GeneralPurpose::new(
&alphabet::STANDARD,
GeneralPurposeConfig::new(),
);
pub fn encode(input: &[u8]) -> String {
B64ENGINE.encode(input)
}
pub fn decode(input: &[u8]) -> Result<Vec<u8>, base64::DecodeError> {
B64ENGINE.decode(input)
}
pub mod binary {
use super::B64ENGINE;
use base64::Engine;
use serde::{Deserialize, Serialize};
use serde::{Deserializer, Serializer};
pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> {
let base64 = B64ENGINE.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
B64ENGINE
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}
pub mod optional_binary {
use super::B64ENGINE;
use base64::Engine;
use serde::{Deserialize, Serialize};
use serde::{Deserializer, Serializer};
pub fn serialize<S: Serializer>(v: &Option<Vec<u8>>, s: S) -> Result<S::Ok, S::Error> {
if let Some(v) = v {
let base64 = B64ENGINE.encode(v);
String::serialize(&base64, s)
} else {
<Option<String>>::serialize(&None, s)
}
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Option<Vec<u8>>, D::Error> {
if let Some(base64) = <Option<String>>::deserialize(d)? {
B64ENGINE
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
.map(Option::Some)
} else {
Ok(None)
}
}
}
}
// Topic configuration API
/// Response for the default topic action
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DefaultTopicActionResponse {
accept: bool,
}
/// Request to set the default topic action
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DefaultTopicActionRequest {
accept: bool,
}
/// Request to add a source to a topic whitelist
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TopicSourceRequest {
subnet: String,
}
/// Request to set a forward socket for a topic
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TopicForwardSocketRequest {
socket_path: String,
}
/// Get the default topic action (accept or reject)
async fn get_default_topic_action<M>(
State(state): State<ServerState<M>>,
) -> Json<DefaultTopicActionResponse>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting default topic action");
let accept = state.node.lock().await.unconfigure_topic_action();
Json(DefaultTopicActionResponse { accept })
}
/// Set the default topic action (accept or reject)
async fn set_default_topic_action<M>(
State(state): State<ServerState<M>>,
Json(request): Json<DefaultTopicActionRequest>,
) -> StatusCode
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(accept=%request.accept, "Setting default topic action");
state
.node
.lock()
.await
.accept_unconfigured_topic(request.accept);
StatusCode::NO_CONTENT
}
/// Get all whitelisted topics
async fn get_topics<M>(State(state): State<ServerState<M>>) -> Json<Vec<String>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting all whitelisted topics");
let node = state.node.lock().await;
// Get the whitelist from the node
let topics = node.topics();
// Convert to TopicInfo structs
let topics: Vec<String> = topics.iter().map(|topic| base64::encode(topic)).collect();
Json(topics)
}
/// Add a topic to the whitelist
async fn add_topic<M>(
State(state): State<ServerState<M>>,
Json(topic_info): Json<Vec<u8>>,
) -> StatusCode
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Adding topic to whitelist");
state.node.lock().await.add_topic_whitelist(topic_info);
StatusCode::CREATED
}
/// Remove a topic from the whitelist
async fn remove_topic<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Removing topic from whitelist");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state.node.lock().await.remove_topic_whitelist(topic_bytes);
Ok(StatusCode::NO_CONTENT)
}
/// Get all sources for a topic
async fn get_topic_sources<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<Json<Vec<String>>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting sources for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
let node = state.node.lock().await;
// Get the whitelist from the node
let sources = node.topic_allowed_sources(&topic_bytes);
// Find the topic in the whitelist
if let Some(sources) = sources {
let sources = sources.into_iter().map(|s| s.to_string()).collect();
Ok(Json(sources))
} else {
Err(StatusCode::NOT_FOUND)
}
}
/// Add a source to a topic whitelist
async fn add_topic_source<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
Json(request): Json<TopicSourceRequest>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Adding source to topic whitelist");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
// Parse the subnet
let subnet = match request.subnet.parse::<Subnet>() {
Ok(subnet) => subnet,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state
.node
.lock()
.await
.add_topic_whitelist_src(topic_bytes, subnet);
Ok(StatusCode::CREATED)
}
/// Remove a source from a topic whitelist
async fn remove_topic_source<M>(
State(state): State<ServerState<M>>,
Path((topic, subnet_str)): Path<(String, String)>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Removing source from topic whitelist");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
// Parse the subnet
let subnet = match subnet_str.parse::<Subnet>() {
Ok(subnet) => subnet,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state
.node
.lock()
.await
.remove_topic_whitelist_src(topic_bytes, subnet);
Ok(StatusCode::NO_CONTENT)
}
/// Get the forward socket for a topic
async fn get_topic_forward_socket<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<Json<Option<String>>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting forward socket for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
let node = state.node.lock().await;
let socket_path = node
.get_topic_forward_socket(&topic_bytes)
.map(|p| p.to_string_lossy().to_string());
Ok(Json(socket_path))
}
/// Set the forward socket for a topic
async fn set_topic_forward_socket<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
Json(request): Json<TopicForwardSocketRequest>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Setting forward socket for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
let socket_path = PathBuf::from(request.socket_path);
state
.node
.lock()
.await
.set_topic_forward_socket(topic_bytes, socket_path);
Ok(StatusCode::NO_CONTENT)
}
/// Remove the forward socket for a topic
async fn remove_topic_forward_socket<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Removing forward socket for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state
.node
.lock()
.await
.delete_topic_forward_socket(topic_bytes);
Ok(StatusCode::NO_CONTENT)
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/admin.rs | mycelium-api/src/rpc/admin.rs | //! Admin-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult};
use std::net::IpAddr;
use std::str::FromStr;
use tracing::debug;
use mycelium::crypto::PublicKey;
use mycelium::metrics::Metrics;
use crate::HttpServerState;
use crate::Info;
use crate::rpc::models::error_codes;
use crate::rpc::traits::AdminApi;
/// Implementation of Admin-related JSON-RPC methods
pub struct AdminRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> AdminRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new AdminRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
}
impl<M> AdminApi for AdminRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn get_info(&self) -> RpcResult<Info> {
debug!("Getting node info via RPC");
let info = self.state.node.blocking_lock().info();
Ok(Info {
node_subnet: info.node_subnet.to_string(),
node_pubkey: info.node_pubkey,
})
}
fn get_pubkey_from_ip(&self, mycelium_ip: String) -> RpcResult<PublicKey> {
debug!(ip = %mycelium_ip, "Getting public key from IP via RPC");
let ip = IpAddr::from_str(&mycelium_ip).map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: format!("Invalid IP address: {}", e),
data: None,
})?;
match self.state.node.blocking_lock().get_pubkey_from_ip(ip) {
Some(pubkey) => Ok(pubkey),
None => Err(Error {
code: ErrorCode::ServerError(error_codes::PUBKEY_NOT_FOUND),
message: "Public key not found".to_string(),
data: None,
}),
}
}
} | rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/route.rs | mycelium-api/src/rpc/route.rs | //! Route-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::Result as RpcResult;
use tracing::debug;
use mycelium::metrics::Metrics;
use crate::HttpServerState;
use crate::Route;
use crate::QueriedSubnet;
use crate::NoRouteSubnet;
use crate::Metric;
use crate::rpc::traits::RouteApi;
/// Implementation of Route-related JSON-RPC methods
pub struct RouteRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> RouteRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new RouteRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
}
impl<M> RouteApi for RouteRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn get_selected_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading selected routes via RPC");
let routes = self.state
.node
.blocking_lock()
.selected_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
fn get_fallback_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading fallback routes via RPC");
let routes = self.state
.node
.blocking_lock()
.fallback_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>> {
debug!("Loading queried subnets via RPC");
let queries = self.state
.node
.blocking_lock()
.queried_subnets()
.into_iter()
.map(|qs| QueriedSubnet {
subnet: qs.subnet().to_string(),
expiration: qs
.query_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(queries)
}
fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>> {
debug!("Loading no route entries via RPC");
let entries = self.state
.node
.blocking_lock()
.no_route_entries()
.into_iter()
.map(|nrs| NoRouteSubnet {
subnet: nrs.subnet().to_string(),
expiration: nrs
.entry_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(entries)
}
} | rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/traits.rs | mycelium-api/src/rpc/traits.rs | //! RPC trait definitions for the Mycelium JSON-RPC API
use jsonrpc_core::Result as RpcResult;
use jsonrpc_derive::rpc;
use crate::Info;
use crate::Route;
use crate::QueriedSubnet;
use crate::NoRouteSubnet;
use mycelium::crypto::PublicKey;
use mycelium::peer_manager::PeerStats;
use mycelium::message::{MessageId, MessageInfo};
// Admin-related RPC methods
#[rpc]
pub trait AdminApi {
/// Get general info about the node
#[rpc(name = "getInfo")]
fn get_info(&self) -> RpcResult<Info>;
/// Get the pubkey from node ip
#[rpc(name = "getPublicKeyFromIp")]
fn get_pubkey_from_ip(&self, mycelium_ip: String) -> RpcResult<PublicKey>;
}
// Peer-related RPC methods
#[rpc]
pub trait PeerApi {
/// List known peers
#[rpc(name = "getPeers")]
fn get_peers(&self) -> RpcResult<Vec<PeerStats>>;
/// Add a new peer
#[rpc(name = "addPeer")]
fn add_peer(&self, endpoint: String) -> RpcResult<bool>;
/// Remove an existing peer
#[rpc(name = "deletePeer")]
fn delete_peer(&self, endpoint: String) -> RpcResult<bool>;
}
// Route-related RPC methods
#[rpc]
pub trait RouteApi {
/// List all selected routes
#[rpc(name = "getSelectedRoutes")]
fn get_selected_routes(&self) -> RpcResult<Vec<Route>>;
/// List all active fallback routes
#[rpc(name = "getFallbackRoutes")]
fn get_fallback_routes(&self) -> RpcResult<Vec<Route>>;
/// List all currently queried subnets
#[rpc(name = "getQueriedSubnets")]
fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>>;
/// List all subnets which are explicitly marked as no route
#[rpc(name = "getNoRouteEntries")]
fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>>;
}
// Message-related RPC methods
#[rpc]
pub trait MessageApi {
/// Get a message from the inbound message queue
#[rpc(name = "popMessage")]
fn pop_message(&self, peek: Option<bool>, timeout: Option<u64>, topic: Option<String>) -> RpcResult<crate::message::MessageReceiveInfo>;
/// Submit a new message to the system
#[rpc(name = "pushMessage")]
fn push_message(&self, message: crate::message::MessageSendInfo, reply_timeout: Option<u64>) -> RpcResult<crate::message::PushMessageResponse>;
/// Reply to a message with the given ID
#[rpc(name = "pushMessageReply")]
fn push_message_reply(&self, id: String, message: crate::message::MessageSendInfo) -> RpcResult<bool>;
/// Get the status of an outbound message
#[rpc(name = "getMessageInfo")]
fn get_message_info(&self, id: String) -> RpcResult<MessageInfo>;
} | rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/message.rs | mycelium-api/src/rpc/message.rs | //! Message-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult};
use std::time::Duration;
use tracing::debug;
use mycelium::message::{MessageId, MessageInfo};
use mycelium::metrics::Metrics;
use crate::message::{MessageReceiveInfo, MessageSendInfo, PushMessageResponse};
use crate::rpc::models::error_codes;
use crate::rpc::traits::MessageApi;
use crate::HttpServerState;
/// Implementation of Message-related JSON-RPC methods
pub struct MessageRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> MessageRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new MessageRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
/// Convert a base64 string to bytes
fn decode_base64(&self, s: &str) -> Result<Vec<u8>, Error> {
base64::engine::general_purpose::STANDARD
.decode(s.as_bytes())
.map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: format!("Invalid base64 encoding: {}", e),
data: None,
})
}
}
impl<M> MessageApi for MessageRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn pop_message(
&self,
peek: Option<bool>,
timeout: Option<u64>,
topic: Option<String>,
) -> RpcResult<MessageReceiveInfo> {
debug!(
"Attempt to get message via RPC, peek {}, timeout {} seconds",
peek.unwrap_or(false),
timeout.unwrap_or(0)
);
let topic_bytes = if let Some(topic_str) = topic {
Some(self.decode_base64(&topic_str)?)
} else {
None
};
// A timeout of 0 seconds essentially means get a message if there is one, and return
// immediately if there isn't.
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
tokio::time::timeout(
Duration::from_secs(timeout.unwrap_or(0)),
self.state
.node
.lock()
.await
.get_message(!peek.unwrap_or(false), topic_bytes),
)
.await
})
});
match result {
Ok(Ok(m)) => Ok(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() {
None
} else {
Some(m.topic)
},
payload: m.data,
}),
_ => Err(Error {
code: ErrorCode::ServerError(error_codes::NO_MESSAGE_READY),
message: "No message ready".to_string(),
data: None,
}),
}
}
fn push_message(
&self,
message: MessageSendInfo,
reply_timeout: Option<u64>,
) -> RpcResult<PushMessageResponse> {
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
self.state.node.lock().await.push_message(
dst,
message.payload,
message.topic,
DEFAULT_MESSAGE_TRY_DURATION,
reply_timeout.is_some(),
)
})
});
let (id, sub) = match result {
Ok((id, sub)) => (id, sub),
Err(_) => {
return Err(Error {
code: ErrorCode::InvalidParams,
message: "Failed to push message".to_string(),
data: None,
});
}
};
if reply_timeout.is_none() {
// If we don't wait for the reply just return here.
return Ok(PushMessageResponse::Id(crate::message::MessageIdReply {
id,
}));
}
let mut sub = sub.unwrap();
// Wait for reply with timeout
let reply_result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
tokio::select! {
sub_res = sub.changed() => {
match sub_res {
Ok(_) => {
if let Some(m) = sub.borrow().deref() {
Ok(PushMessageResponse::Reply(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() { None } else { Some(m.topic.clone()) },
payload: m.data.clone(),
}))
} else {
// This happens if a none value is send, which should not happen.
Err(Error {
code: ErrorCode::InternalError,
message: "Internal error while waiting for reply".to_string(),
data: None,
})
}
}
Err(_) => {
// This happens if the sender drops, which should not happen.
Err(Error {
code: ErrorCode::InternalError,
message: "Internal error while waiting for reply".to_string(),
data: None,
})
}
}
},
_ = tokio::time::sleep(Duration::from_secs(reply_timeout.unwrap_or(0))) => {
// Timeout expired while waiting for reply
Ok(PushMessageResponse::Id(crate::message::MessageIdReply { id }))
}
}
})
});
match reply_result {
Ok(response) => Ok(response),
Err(e) => Err(e),
}
}
fn push_message_reply(&self, id: String, message: MessageSendInfo) -> RpcResult<bool> {
let message_id = match MessageId::from_hex(&id) {
Ok(id) => id,
Err(_) => {
return Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid message ID".to_string(),
data: None,
});
}
};
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.id=id,
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new reply to message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
self.state.node.lock().await.reply_message(
message_id,
dst,
message.payload,
DEFAULT_MESSAGE_TRY_DURATION,
);
})
});
Ok(true)
}
fn get_message_info(&self, id: String) -> RpcResult<MessageInfo> {
let message_id = match MessageId::from_hex(&id) {
Ok(id) => id,
Err(_) => {
return Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid message ID".to_string(),
data: None,
});
}
};
debug!(message.id=%id, "Fetching message status via RPC");
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current()
.block_on(async { self.state.node.lock().await.message_status(message_id) })
});
match result {
Some(info) => Ok(info),
None => Err(Error {
code: ErrorCode::ServerError(error_codes::MESSAGE_NOT_FOUND),
message: "Message not found".to_string(),
data: None,
}),
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/peer.rs | mycelium-api/src/rpc/peer.rs | //! Peer-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult};
use std::str::FromStr;
use tracing::debug;
use mycelium::endpoint::Endpoint;
use mycelium::metrics::Metrics;
use mycelium::peer_manager::{PeerExists, PeerNotFound, PeerStats};
use crate::rpc::models::error_codes;
use crate::rpc::traits::PeerApi;
use crate::HttpServerState;
/// Implementation of Peer-related JSON-RPC methods
pub struct PeerRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> PeerRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new PeerRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
}
impl<M> PeerApi for PeerRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn get_peers(&self) -> RpcResult<Vec<PeerStats>> {
debug!("Fetching peer stats via RPC");
Ok(self.state.node.blocking_lock().peer_info())
}
fn add_peer(&self, endpoint: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint,
"Attempting to add peer to the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint).map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: e.to_string(),
data: None,
})?;
match self.state.node.blocking_lock().add_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerExists) => Err(Error {
code: ErrorCode::ServerError(error_codes::PEER_EXISTS),
message: "A peer identified by that endpoint already exists".to_string(),
data: None,
}),
}
}
fn delete_peer(&self, endpoint: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint,
"Attempting to remove peer from the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint).map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: e.to_string(),
data: None,
})?;
match self.state.node.blocking_lock().remove_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerNotFound) => Err(Error {
code: ErrorCode::ServerError(error_codes::PEER_NOT_FOUND),
message: "A peer identified by that endpoint does not exist".to_string(),
data: None,
}),
}
}
}
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/models.rs | mycelium-api/src/rpc/models.rs | //! Models for the Mycelium JSON-RPC API
use serde::{Deserialize, Serialize};
// Define any additional models needed for the JSON-RPC API
// Most models can be reused from the existing REST API
/// Error codes for the JSON-RPC API
pub mod error_codes {
/// Invalid parameters error code
pub const INVALID_PARAMS: i64 = -32602;
/// Peer already exists error code
pub const PEER_EXISTS: i64 = 409;
/// Peer not found error code
pub const PEER_NOT_FOUND: i64 = 404;
/// Message not found error code
pub const MESSAGE_NOT_FOUND: i64 = 404;
/// Public key not found error code
pub const PUBKEY_NOT_FOUND: i64 = 404;
/// No message ready error code
pub const NO_MESSAGE_READY: i64 = 204;
/// Timeout waiting for reply error code
pub const TIMEOUT_WAITING_FOR_REPLY: i64 = 408;
} | rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/mycelium-api/src/rpc/spec.rs | mycelium-api/src/rpc/spec.rs | //! OpenRPC specification for the Mycelium JSON-RPC API
/// The OpenRPC specification for the Mycelium JSON-RPC API
pub const OPENRPC_SPEC: &str = include_str!("../../../docs/openrpc.json");
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | false |
threefoldtech/mycelium | https://github.com/threefoldtech/mycelium/blob/e8653f66a01eab8175acdb06e05cca15b7fae722/myceliumd-private/src/main.rs | myceliumd-private/src/main.rs | use std::io::{self, Read};
use std::net::Ipv4Addr;
use std::path::Path;
use std::sync::Arc;
use std::{
error::Error,
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use std::{fmt::Display, str::FromStr};
use clap::{Args, Parser, Subcommand};
use mycelium::message::TopicConfig;
use serde::{Deserialize, Deserializer};
use tokio::fs::File;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[cfg(target_family = "unix")]
use tokio::signal::{self, unix::SignalKind};
use tokio::sync::Mutex;
use tracing::{debug, error, info, warn};
use crypto::PublicKey;
use mycelium::endpoint::Endpoint;
use mycelium::{crypto, Node};
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
use tracing_subscriber::EnvFilter;
/// The default port on the underlay to listen on for incoming TCP connections.
const DEFAULT_TCP_LISTEN_PORT: u16 = 9651;
/// The default port on the underlay to listen on for incoming Quic connections.
const DEFAULT_QUIC_LISTEN_PORT: u16 = 9651;
/// The default port to use for IPv6 link local peer discovery (UDP).
const DEFAULT_PEER_DISCOVERY_PORT: u16 = 9650;
/// The default listening address for the HTTP API.
const DEFAULT_HTTP_API_SERVER_ADDRESS: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8989);
/// The default listening address for the JSON-RPC API.
const DEFAULT_JSONRPC_API_SERVER_ADDRESS: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8990);
/// Default name of tun interface
#[cfg(not(target_os = "macos"))]
const TUN_NAME: &str = "mycelium";
/// Default name of tun interface
#[cfg(target_os = "macos")]
const TUN_NAME: &str = "utun0";
/// The logging formats that can be selected.
#[derive(Clone, PartialEq, Eq)]
enum LoggingFormat {
Compact,
Logfmt,
/// Same as Logfmt but with color statically disabled
Plain,
}
impl Display for LoggingFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
LoggingFormat::Compact => "compact",
LoggingFormat::Logfmt => "logfmt",
LoggingFormat::Plain => "plain",
}
)
}
}
impl FromStr for LoggingFormat {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"compact" => LoggingFormat::Compact,
"logfmt" => LoggingFormat::Logfmt,
"plain" => LoggingFormat::Plain,
_ => return Err("invalid logging format"),
})
}
}
#[derive(Parser)]
#[command(version)]
struct Cli {
/// Path to the private key file. This will be created if it does not exist. Default
/// [priv_key.bin].
#[arg(short = 'k', long = "key-file", global = true)]
key_file: Option<PathBuf>,
// Configuration file
#[arg(short = 'c', long = "config-file", global = true)]
config_file: Option<PathBuf>,
/// Enable debug logging. Does nothing if `--silent` is set.
#[arg(short = 'd', long = "debug", default_value_t = false)]
debug: bool,
/// Disable all logs except error logs.
#[arg(long = "silent", default_value_t = false)]
silent: bool,
/// The logging format to use. `logfmt` and `compact` is supported.
#[arg(long = "log-format", default_value_t = LoggingFormat::Compact)]
logging_format: LoggingFormat,
#[clap(flatten)]
node_args: NodeArguments,
#[command(subcommand)]
command: Option<Command>,
}
#[derive(Debug, Subcommand)]
pub enum Command {
/// Inspect a public key provided in hex format, or export the local public key if no key is
/// given.
Inspect {
/// Output in json format.
#[arg(long = "json")]
json: bool,
/// The key to inspect.
key: Option<String>,
},
/// Generate a set of new keys for the system at the default path, or the path provided by the
/// --key-file parameter
GenerateKeys {
/// Force generating new keys, removing any existing key in the process
#[arg(long = "force")]
force: bool,
},
/// Actions on the message subsystem
Message {
#[command(subcommand)]
command: MessageCommand,
},
/// Actions related to peers (list, remove, add)
Peers {
#[command(subcommand)]
command: PeersCommand,
},
/// Actions related to routes (selected, fallback)
Routes {
#[command(subcommand)]
command: RoutesCommand,
},
/// Actions related to the SOCKS5 proxy
Proxy {
#[command(subcommand)]
command: ProxyCommand,
},
}
#[derive(Debug, Subcommand)]
pub enum MessageCommand {
Send {
/// Wait for a reply from the receiver.
#[arg(short = 'w', long = "wait", default_value_t = false)]
wait: bool,
/// An optional timeout to wait for. This does nothing if the `--wait` flag is not set. If
/// `--wait` is set and this flag isn't, wait forever for a reply.
#[arg(long = "timeout")]
timeout: Option<u64>,
/// Optional topic of the message. Receivers can filter on this to only receive messages
/// for a chosen topic.
#[arg(short = 't', long = "topic")]
topic: Option<String>,
/// Optional file to use as message body.
#[arg(long = "msg-path")]
msg_path: Option<PathBuf>,
/// Optional message ID to reply to.
#[arg(long = "reply-to")]
reply_to: Option<String>,
/// Destination of the message, either a hex encoded public key, or an IPv6 address in the
/// 400::/7 range.
destination: String,
/// The message to send. This is required if `--msg_path` is not set
message: Option<String>,
},
Receive {
/// An optional timeout to wait for a message. If this is not set, wait forever.
#[arg(long = "timeout")]
timeout: Option<u64>,
/// Optional topic of the message. Only messages with this topic will be received by this
/// command.
#[arg(short = 't', long = "topic")]
topic: Option<String>,
/// Optional file in which the message body will be saved.
#[arg(long = "msg-path")]
msg_path: Option<PathBuf>,
/// Don't print the metadata
#[arg(long = "raw")]
raw: bool,
},
}
#[derive(Debug, Subcommand)]
pub enum PeersCommand {
/// List the connected peers
List {
/// Print the peers list in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Add peer(s)
Add { peers: Vec<String> },
/// Remove peer(s)
Remove { peers: Vec<String> },
}
#[derive(Debug, Subcommand)]
pub enum RoutesCommand {
/// Print all selected routes
Selected {
/// Print selected routes in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Print all fallback routes
Fallback {
/// Print fallback routes in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Print the currently queried subnets
Queried {
/// Print queried subnets in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Print all subnets which are explicitly marked as not having a route
NoRoute {
/// Print subnets in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
}
#[derive(Debug, Subcommand)]
pub enum ProxyCommand {
/// List known proxies
List {
/// Print in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Connect to a proxy, optionally specifying a remote [IPV6]:PORT
Connect {
/// Optional remote socket address, e.g. [407:...]:1080
#[arg(long = "remote")]
remote: Option<String>,
/// Print in JSON format
#[arg(long = "json", default_value_t = false)]
json: bool,
},
/// Disconnect from the current proxy
Disconnect,
/// Manage background proxy probing
Probe {
#[command(subcommand)]
command: ProxyProbeCommand,
},
}
#[derive(Debug, Subcommand)]
pub enum ProxyProbeCommand {
/// Start background proxy probing
Start,
/// Stop background proxy probing
Stop,
}
#[derive(Debug, Args)]
pub struct NodeArguments {
/// Peers to connect to.
#[arg(long = "peers", num_args = 1..)]
static_peers: Vec<Endpoint>,
/// Port to listen on for tcp connections.
#[arg(short = 't', long = "tcp-listen-port", default_value_t = DEFAULT_TCP_LISTEN_PORT)]
tcp_listen_port: u16,
/// Disable quic protocol for connecting to peers
#[arg(long = "disable-quic", default_value_t = false)]
disable_quic: bool,
/// Port to listen on for quic connections.
#[arg(short = 'q', long = "quic-listen-port", default_value_t = DEFAULT_QUIC_LISTEN_PORT)]
quic_listen_port: u16,
/// Port to use for link local peer discovery. This uses the UDP protocol.
#[arg(long = "peer-discovery-port", default_value_t = DEFAULT_PEER_DISCOVERY_PORT)]
peer_discovery_port: u16,
/// Disable peer discovery.
///
/// If this flag is passed, the automatic link local peer discovery will not be enabled, and
/// peers must be configured manually. If this is disabled on all local peers, communication
/// between them will go over configured external peers.
#[arg(long = "disable-peer-discovery", default_value_t = false)]
disable_peer_discovery: bool,
/// Address of the HTTP API server.
#[arg(long = "api-addr", default_value_t = DEFAULT_HTTP_API_SERVER_ADDRESS)]
api_addr: SocketAddr,
/// Address of the JSON-RPC API server.
#[arg(long = "jsonrpc-addr", default_value_t = DEFAULT_JSONRPC_API_SERVER_ADDRESS)]
jsonrpc_addr: SocketAddr,
/// Run without creating a TUN interface.
///
/// The system will participate in the network as usual, but won't be able to send out L3
/// packets. Inbound L3 traffic will be silently discarded. The message subsystem will still
/// work however.
#[arg(long = "no-tun", default_value_t = false)]
no_tun: bool,
/// Name to use for the TUN interface, if one is created.
///
/// Setting this only matters if a TUN interface is actually created, i.e. if the `--no-tun`
/// flag is **not** set. The name set here must be valid for the current platform, e.g. on OSX,
/// the name must start with `utun` and be followed by digits.
#[arg(long = "tun-name")]
tun_name: Option<String>,
/// Enable a private network, with this name.
///
/// If this flag is set, the system will run in "private network mode", and use Tls connections
/// instead of plain Tcp connections. The name provided here is used as the network name, other
/// nodes must use the same name or the connection will be rejected. Note that the name is
/// public, and is communicated when connecting to a remote. Do not put confidential data here.
#[arg(long = "network-name", requires = "network_key_file")]
network_name: Option<String>,
/// The path to the file with the key to use for the private network.
///
/// The key is expected to be exactly 32 bytes. The key must be shared between all nodes
/// participating in the network, and is secret. If the key leaks, anyone can then join the
/// network.
#[arg(long = "network-key-file", requires = "network_name")]
network_key_file: Option<PathBuf>,
/// The address on which to expose prometheus metrics, if desired.
///
/// Setting this flag will attempt to start an HTTP server on the provided address, to serve
/// prometheus metrics on the /metrics endpoint. If this flag is not set, metrics are also not
/// collected.
#[arg(long = "metrics-api-address")]
metrics_api_address: Option<SocketAddr>,
/// The firewall mark to set on the mycelium sockets.
///
/// This allows to identify packets that contain encapsulated mycelium packets so that
/// different routing policies can be applied to them.
/// This option only has an effect on Linux.
#[arg(long = "firewall-mark")]
firewall_mark: Option<u32>,
/// The amount of worker tasks to spawn to handle updates.
///
/// By default, updates are processed on a single task only. This is sufficient for most use
/// cases. In case you notice that the node can't keep up with the incoming updates (typically
/// because you are running a public node with a lot of connections), this value can be
/// increased to process updates in parallel.
#[arg(long = "update-workers", default_value_t = 1)]
update_workers: usize,
/// The topic configuration.
///
/// A .toml file containing topic configuration. This is a default action in case the topic is
/// not listed, and an explicit whitelist for allowed subnets/ips which are otherwise allowed
/// to use a topic.
#[arg(long = "topic-config")]
topic_config: Option<PathBuf>,
/// The cache directory for the mycelium CDN module
///
/// This directory will be used to cache reconstructed content blocks which were loaded through
/// the CDN functionallity for faster access next time.
#[arg(long = "cdn-cache")]
cdn_cache: Option<PathBuf>,
/// Enable the dns resolver
///
/// When the DNS resolver is enabled, it will bind a UDP socket on port 53. If this fails, the
/// system will not continue starting. All queries sent to this resolver will be forwarded to
/// the system resolvers.
#[arg(long = "enable-dns")]
enable_dns: bool,
}
#[derive(Debug, Deserialize)]
pub struct MergedNodeConfig {
peers: Vec<Endpoint>,
tcp_listen_port: u16,
disable_quic: bool,
quic_listen_port: u16,
peer_discovery_port: u16,
disable_peer_discovery: bool,
api_addr: SocketAddr,
jsonrpc_addr: SocketAddr,
no_tun: bool,
tun_name: String,
metrics_api_address: Option<SocketAddr>,
network_key_file: Option<PathBuf>,
network_name: Option<String>,
firewall_mark: Option<u32>,
update_workers: usize,
topic_config: Option<PathBuf>,
cdn_cache: Option<PathBuf>,
enable_dns: bool,
}
#[derive(Debug, Deserialize, Default)]
struct MyceliumConfig {
#[serde(deserialize_with = "deserialize_optional_endpoint_str_from_toml")]
peers: Option<Vec<Endpoint>>,
tcp_listen_port: Option<u16>,
disable_quic: Option<bool>,
quic_listen_port: Option<u16>,
no_tun: Option<bool>,
tun_name: Option<String>,
disable_peer_discovery: Option<bool>,
peer_discovery_port: Option<u16>,
api_addr: Option<SocketAddr>,
jsonrpc_addr: Option<SocketAddr>,
metrics_api_address: Option<SocketAddr>,
network_name: Option<String>,
network_key_file: Option<PathBuf>,
firewall_mark: Option<u32>,
update_workers: Option<usize>,
topic_config: Option<PathBuf>,
cdn_cache: Option<PathBuf>,
enable_dns: Option<bool>,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
let cli = Cli::parse();
// Init default configuration
let mut mycelium_config = MyceliumConfig::default();
// Load configuration file
if let Some(config_file_path) = &cli.config_file {
if Path::new(config_file_path).exists() {
let config = config::Config::builder()
.add_source(config::File::new(
config_file_path.to_str().unwrap(),
config::FileFormat::Toml,
))
.build()?;
mycelium_config = config.try_deserialize()?;
} else {
let error_msg = format!("Config file {config_file_path:?} not found");
return Err(io::Error::new(io::ErrorKind::NotFound, error_msg).into());
}
} else if let Some(mut conf) = dirs::config_dir() {
// Windows: %APPDATA%/ThreeFold Tech/Mycelium/mycelium.conf
#[cfg(target_os = "windows")]
{
conf = conf
.join("ThreeFold Tech")
.join("Mycelium")
.join("mycelium.toml")
};
// Linux: $HOME/.config/mycelium/mycelium.conf
#[allow(clippy::unnecessary_operation)]
#[cfg(target_os = "linux")]
{
conf = conf.join("mycelium").join("mycelium.toml")
};
// MacOS: $HOME/Library/Application Support/ThreeFold Tech/Mycelium/mycelium.conf
#[cfg(target_os = "macos")]
{
conf = conf
.join("ThreeFold Tech")
.join("Mycelium")
.join("mycelium.toml")
};
if conf.exists() {
info!(
conf_dir = conf.to_str().unwrap(),
"Mycelium is starting with configuration file",
);
let config = config::Config::builder()
.add_source(config::File::new(
conf.to_str().unwrap(),
config::FileFormat::Toml,
))
.build()?;
mycelium_config = config.try_deserialize()?;
}
}
let level = if cli.silent {
tracing::Level::ERROR
} else if cli.debug {
tracing::Level::DEBUG
} else {
tracing::Level::INFO
};
tracing_subscriber::registry()
.with(
EnvFilter::builder()
.with_default_directive(level.into())
.from_env()
.expect("invalid RUST_LOG"),
)
.with(
(cli.logging_format == LoggingFormat::Compact)
.then(|| tracing_subscriber::fmt::Layer::new().compact()),
)
.with((cli.logging_format == LoggingFormat::Logfmt).then(tracing_logfmt::layer))
.with((cli.logging_format == LoggingFormat::Plain).then(|| {
tracing_logfmt::builder()
// Explicitly force color off
.with_ansi_color(false)
.layer()
}))
.init();
let key_path = cli.key_file.unwrap_or_else(|| {
let mut key_path = dirs::data_local_dir().unwrap_or_else(|| ".".into());
// Windows: %LOCALAPPDATA%/ThreeFold Tech/Mycelium/priv_key.bin
#[cfg(target_os = "windows")]
{
key_path = key_path.join("ThreeFold Tech").join("Mycelium")
};
// Linux: $HOME/.local/share/mycelium/priv_key.bin
#[allow(clippy::unnecessary_operation)]
#[cfg(target_os = "linux")]
{
key_path = key_path.join("mycelium")
};
// MacOS: $HOME/Library/Application Support/ThreeFold Tech/Mycelium/priv_key.bin
#[cfg(target_os = "macos")]
{
key_path = key_path.join("ThreeFold Tech").join("Mycelium")
};
// If the dir does not exist, create it
if !key_path.exists() {
info!(
data_dir = key_path.to_str().unwrap(),
"Data config dir does not exist, create it"
);
if let Err(err) = std::fs::create_dir_all(&key_path) {
error!(%err, data_dir = key_path.to_str().unwrap(), "Could not create data directory");
std::process::exit(1);
}
}
key_path = key_path.join("priv_key.bin");
if key_path.exists() {
info!(key_path = key_path.to_str().unwrap(), "Using key file",);
}
key_path
});
match cli.command {
None => {
let merged_config = merge_config(cli.node_args, mycelium_config);
let topic_config = merged_config.topic_config.as_ref().and_then(|path| {
let mut content = String::new();
let mut file = std::fs::File::open(path).ok()?;
file.read_to_string(&mut content).ok()?;
toml::from_str::<TopicConfig>(&content).ok()
});
if topic_config.is_some() {
info!(path = ?merged_config.topic_config, "Loaded topic cofig");
}
let private_network_config =
match (merged_config.network_name, merged_config.network_key_file) {
(Some(network_name), Some(network_key_file)) => {
let net_key = load_key_file(&network_key_file).await?;
Some((network_name, net_key))
}
_ => None,
};
let node_keys = get_node_keys(&key_path).await?;
let node_secret_key = if let Some((node_secret_key, _)) = node_keys {
node_secret_key
} else {
warn!("Node key file {key_path:?} not found, generating new keys");
let secret_key = crypto::SecretKey::new();
save_key_file(&secret_key, &key_path).await?;
secret_key
};
let _api = if let Some(metrics_api_addr) = merged_config.metrics_api_address {
let metrics = mycelium_metrics::PrometheusExporter::new();
let config = mycelium::Config {
node_key: node_secret_key,
peers: merged_config.peers,
no_tun: merged_config.no_tun,
tcp_listen_port: merged_config.tcp_listen_port,
quic_listen_port: if merged_config.disable_quic {
None
} else {
Some(merged_config.quic_listen_port)
},
peer_discovery_port: if merged_config.disable_peer_discovery {
None
} else {
Some(merged_config.peer_discovery_port)
},
tun_name: merged_config.tun_name,
private_network_config,
metrics: metrics.clone(),
firewall_mark: merged_config.firewall_mark,
update_workers: merged_config.update_workers,
topic_config,
cdn_cache: merged_config.cdn_cache,
enable_dns: merged_config.enable_dns,
};
metrics.spawn(metrics_api_addr);
let node = Arc::new(Mutex::new(Node::new(config).await?));
let http_api = mycelium_api::Http::spawn(node.clone(), merged_config.api_addr);
// Initialize the JSON-RPC server
let rpc_api =
mycelium_api::rpc::JsonRpc::spawn(node, merged_config.jsonrpc_addr).await;
(http_api, rpc_api)
} else {
let config = mycelium::Config {
node_key: node_secret_key,
peers: merged_config.peers,
no_tun: merged_config.no_tun,
tcp_listen_port: merged_config.tcp_listen_port,
quic_listen_port: if merged_config.disable_quic {
None
} else {
Some(merged_config.quic_listen_port)
},
peer_discovery_port: if merged_config.disable_peer_discovery {
None
} else {
Some(merged_config.peer_discovery_port)
},
tun_name: merged_config.tun_name,
private_network_config,
metrics: mycelium_metrics::NoMetrics,
firewall_mark: merged_config.firewall_mark,
update_workers: merged_config.update_workers,
topic_config,
cdn_cache: merged_config.cdn_cache,
enable_dns: merged_config.enable_dns,
};
let node = Arc::new(Mutex::new(Node::new(config).await?));
let http_api = mycelium_api::Http::spawn(node.clone(), merged_config.api_addr);
// Initialize the JSON-RPC server
let rpc_api =
mycelium_api::rpc::JsonRpc::spawn(node, merged_config.jsonrpc_addr).await;
(http_api, rpc_api)
};
// TODO: put in dedicated file so we can only rely on certain signals on unix platforms
#[cfg(target_family = "unix")]
{
let mut sigint = signal::unix::signal(SignalKind::interrupt())
.expect("Can install SIGINT handler");
let mut sigterm = signal::unix::signal(SignalKind::terminate())
.expect("Can install SIGTERM handler");
tokio::select! {
_ = sigint.recv() => { }
_ = sigterm.recv() => { }
}
}
#[cfg(not(target_family = "unix"))]
{
if let Err(e) = tokio::signal::ctrl_c().await {
error!("Failed to wait for SIGINT: {e}");
}
}
}
Some(cmd) => match cmd {
Command::Inspect { json, key } => {
let node_keys = get_node_keys(&key_path).await?;
let key = if let Some(key) = key {
PublicKey::try_from(key.as_str())?
} else if let Some((_, node_pub_key)) = node_keys {
node_pub_key
} else {
error!("No key to inspect provided and no key found at {key_path:?}");
return Err(io::Error::new(
io::ErrorKind::NotFound,
"no key to inspect and key file not found",
)
.into());
};
mycelium_cli::inspect(key, json)?;
return Ok(());
}
Command::GenerateKeys { force } => {
let node_keys = get_node_keys(&key_path).await?;
if node_keys.is_none() || force {
info!(?key_path, "Generating new node keys");
let secret_key = crypto::SecretKey::new();
save_key_file(&secret_key, &key_path).await?;
} else {
warn!(?key_path, "Refusing to generate new keys as key file already exists, use `--force` to generate them anyway");
}
}
Command::Message { command } => match command {
MessageCommand::Send {
wait,
timeout,
topic,
msg_path,
reply_to,
destination,
message,
} => {
return mycelium_cli::send_msg(
destination,
message,
wait,
timeout,
reply_to,
topic,
msg_path,
cli.node_args.api_addr,
)
.await
}
MessageCommand::Receive {
timeout,
topic,
msg_path,
raw,
} => {
return mycelium_cli::recv_msg(
timeout,
topic,
msg_path,
raw,
cli.node_args.api_addr,
)
.await
}
},
Command::Peers { command } => match command {
PeersCommand::List { json } => {
return mycelium_cli::list_peers(cli.node_args.api_addr, json).await;
}
PeersCommand::Add { peers } => {
return mycelium_cli::add_peers(cli.node_args.api_addr, peers).await;
}
PeersCommand::Remove { peers } => {
return mycelium_cli::remove_peers(cli.node_args.api_addr, peers).await;
}
},
Command::Routes { command } => match command {
RoutesCommand::Selected { json } => {
return mycelium_cli::list_selected_routes(cli.node_args.api_addr, json).await;
}
RoutesCommand::Fallback { json } => {
return mycelium_cli::list_fallback_routes(cli.node_args.api_addr, json).await;
}
RoutesCommand::Queried { json } => {
return mycelium_cli::list_queried_subnets(cli.node_args.api_addr, json).await;
}
RoutesCommand::NoRoute { json } => {
return mycelium_cli::list_no_route_entries(cli.node_args.api_addr, json).await;
}
},
Command::Proxy { command } => match command {
ProxyCommand::List { json } => {
return mycelium_cli::list_proxies(cli.node_args.api_addr, json).await;
}
ProxyCommand::Connect { remote, json } => {
let remote_parsed = if let Some(r) = remote {
match r.parse::<SocketAddr>() {
Ok(addr) => Some(addr),
Err(e) => {
error!("Invalid --remote value '{r}': {e}");
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("invalid --remote socket address: {e}"),
)
.into());
}
}
} else {
None
};
return mycelium_cli::connect_proxy(
cli.node_args.api_addr,
remote_parsed,
json,
)
.await;
}
ProxyCommand::Disconnect => {
return mycelium_cli::disconnect_proxy(cli.node_args.api_addr).await;
}
ProxyCommand::Probe { command } => match command {
ProxyProbeCommand::Start => {
return mycelium_cli::start_proxy_probe(cli.node_args.api_addr).await;
}
ProxyProbeCommand::Stop => {
return mycelium_cli::stop_proxy_probe(cli.node_args.api_addr).await;
}
},
},
},
}
Ok(())
}
async fn get_node_keys(
key_path: &PathBuf,
) -> Result<Option<(crypto::SecretKey, crypto::PublicKey)>, io::Error> {
if key_path.exists() {
let sk = load_key_file(key_path).await?;
let pk = crypto::PublicKey::from(&sk);
debug!("Loaded key file at {key_path:?}");
Ok(Some((sk, pk)))
} else {
Ok(None)
}
}
async fn load_key_file<T>(path: &Path) -> Result<T, io::Error>
where
T: From<[u8; 32]>,
{
let mut file = File::open(path).await?;
let mut secret_bytes = [0u8; 32];
file.read_exact(&mut secret_bytes).await?;
Ok(T::from(secret_bytes))
}
/// Save a key to a file at the given path. If the file already exists, it will be overwritten.
async fn save_key_file(key: &crypto::SecretKey, path: &Path) -> io::Result<()> {
#[cfg(target_family = "unix")]
{
use tokio::fs::OpenOptions;
let mut file = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.mode(0o644)
.open(path)
.await?;
file.write_all(key.as_bytes()).await?;
}
#[cfg(not(target_family = "unix"))]
{
let mut file = File::create(path).await?;
file.write_all(key.as_bytes()).await?;
}
Ok(())
}
fn merge_config(cli_args: NodeArguments, file_config: MyceliumConfig) -> MergedNodeConfig {
MergedNodeConfig {
peers: if !cli_args.static_peers.is_empty() {
cli_args.static_peers
} else {
file_config.peers.unwrap_or_default()
},
tcp_listen_port: if cli_args.tcp_listen_port != DEFAULT_TCP_LISTEN_PORT {
cli_args.tcp_listen_port
} else {
file_config
.tcp_listen_port
.unwrap_or(DEFAULT_TCP_LISTEN_PORT)
},
| rust | Apache-2.0 | e8653f66a01eab8175acdb06e05cca15b7fae722 | 2026-01-04T20:24:55.945094Z | true |
smart-leds-rs/smart-leds | https://github.com/smart-leds-rs/smart-leds/blob/61a0f6b48cf2e4521a13d1820a128a970035d707/src/lib.rs | src/lib.rs | //! # Smart Leds
//!
//! Smart leds is a collection of crates to use smart leds on embedded devices with rust.
//!
//! Examples of smart leds include the popular WS2812 (also called Neopixel),
//! APA102 (DotStar) and other leds, which can be individually adressed.
//!
//! Other driver crates implement these indivdual interfaces and should be used in
//! tandem with this crate. This crate provides various convenience utilities
//! for end users.
//!
//! Other crates should depended on the
//! [smart-leds-trait](https://crates.io/crates/smart-leds-trait) crate, which
//! (should) experience less breaking changes
#![no_std]
pub mod colors;
pub mod hsv;
pub use smart_leds_trait::*;
/// An iterator that provides brightness reduction
///
/// Please be aware that using this after gamma correction the colours doesn't
/// work right.
pub struct Brightness<I> {
iter: I,
brightness: u8,
}
impl<I> Iterator for Brightness<I>
where
I: Iterator<Item = RGB8>,
{
type Item = RGB8;
fn next(&mut self) -> Option<RGB8> {
self.iter.next().map(|a| RGB8 {
r: (a.r as u16 * (self.brightness as u16 + 1) / 256) as u8,
g: (a.g as u16 * (self.brightness as u16 + 1) / 256) as u8,
b: (a.b as u16 * (self.brightness as u16 + 1) / 256) as u8,
})
}
}
/// Pass your iterator into this function to get reduced brightness
pub fn brightness<I>(iter: I, brightness: u8) -> Brightness<I>
where
I: Iterator<Item = RGB8>,
{
Brightness { iter, brightness }
}
/// An iterator that provides gamma correction.
/// Makes the colour distribution non-linear, to match your eyes' perception
/// In other words, makes orange look orange.
/// If using in combination with a brightness reduction, apply the gamma
/// correction first, then the brightness reduction
/// ie: brightness(gamma(data.iter().cloned()), 32)
pub struct Gamma<I> {
iter: I,
}
impl<I> Iterator for Gamma<I>
where
I: Iterator<Item = RGB8>,
{
type Item = RGB8;
fn next(&mut self) -> Option<RGB8> {
// This table remaps linear input values
// (the numbers we’d like to use; e.g. 127 = half brightness)
// to nonlinear gamma-corrected output values
// (numbers producing the desired effect on the LED;
// e.g. 36 = half brightness).
//
// It's generated using the gamma.py script
const GAMMA8: [u8; 256] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4,
4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11,
12, 12, 13, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22,
22, 23, 24, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37,
38, 39, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 72, 73, 74, 75, 77, 78, 79, 81, 82, 83, 85,
86, 87, 89, 90, 92, 93, 95, 96, 98, 99, 101, 102, 104, 105, 107, 109, 110, 112, 114,
115, 117, 119, 120, 122, 124, 126, 127, 129, 131, 133, 135, 137, 138, 140, 142, 144,
146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 167, 169, 171, 173, 175, 177, 180,
182, 184, 186, 189, 191, 193, 196, 198, 200, 203, 205, 208, 210, 213, 215, 218, 220,
223, 225, 228, 231, 233, 236, 239, 241, 244, 247, 249, 252, 255,
];
self.iter.next().map(|a| RGB8 {
r: GAMMA8[a.r as usize],
g: GAMMA8[a.g as usize],
b: GAMMA8[a.b as usize],
})
}
}
/// Pass your iterator into this function to get corrected gamma
pub fn gamma<I>(iter: I) -> Gamma<I>
where
I: Iterator<Item = RGB8>,
{
Gamma { iter }
}
| rust | Apache-2.0 | 61a0f6b48cf2e4521a13d1820a128a970035d707 | 2026-01-04T20:25:05.940189Z | false |
smart-leds-rs/smart-leds | https://github.com/smart-leds-rs/smart-leds/blob/61a0f6b48cf2e4521a13d1820a128a970035d707/src/hsv.rs | src/hsv.rs | use smart_leds_trait::*;
#[derive(Copy, Clone, Default)]
pub struct Hsv {
pub hue: u8,
pub sat: u8,
pub val: u8,
}
/// Converts a hsv value into RGB values. Because the hsv values are integers, the precision of the
/// resulting RGB value is limited to ±4.
///
/// NOTE: Since most led protocols & their implementations are very timing
/// sensitive, it's advisable to do the conversion before `write`-ing.
///
/// # Example
/// ```
/// use smart_leds::hsv::{hsv2rgb, Hsv};
/// let hsv = Hsv{hue: 89, sat: 230, val: 42};
/// let conv_rgb = hsv2rgb(&hsv);
/// // will return RGB { r: 4, g: 41, b: 8},
/// ```
pub fn hsv2rgb(hsv: &Hsv) -> RGB8 {
let v: u16 = hsv.val as u16;
let s: u16 = hsv.sat as u16;
let f: u16 = (hsv.hue as u16 * 2 % 85) * 3; // relative interval
let p: u16 = v * (255 - s) / 255;
let q: u16 = v * (255 - (s * f) / 255) / 255;
let t: u16 = v * (255 - (s * (255 - f)) / 255) / 255;
match hsv.hue {
0..=42 => RGB {
r: v as u8,
g: t as u8,
b: p as u8,
},
43..=84 => RGB {
r: q as u8,
g: v as u8,
b: p as u8,
},
85..=127 => RGB {
r: p as u8,
g: v as u8,
b: t as u8,
},
128..=169 => RGB {
r: p as u8,
g: q as u8,
b: v as u8,
},
170..=212 => RGB {
r: t as u8,
g: p as u8,
b: v as u8,
},
213..=254 => RGB {
r: v as u8,
g: p as u8,
b: q as u8,
},
255 => RGB {
r: v as u8,
g: t as u8,
b: p as u8,
},
}
}
/// Converts a hsv value into RGBW values.
///
/// # Example
/// ```
/// use smart_leds::hsv::{hsv2rgbw, Hsv};
/// let hsv = Hsv{hue: 89, sat: 230, val: 42};
/// let conv_rgb = hsv2rgbw(&hsv, 255);
/// // will return RGBW { r: 4, g: 41, b: 8, a: 255},
/// ```
pub fn hsv2rgbw(hsv: &Hsv, a: u8) -> RGBW<u8> {
let rgb = hsv2rgb(hsv);
RGBW {
r: rgb.r,
g: rgb.g,
b: rgb.b,
a: White(a),
}
}
#[cfg(test)]
mod test {
use super::*;
fn distance(i: u8, j: u8) -> u8 {
if i < j {
j - i
} else {
i - j
}
}
#[test]
fn test_hsv2rgb_1() {
#[rustfmt::skip]
let hsv = [
Hsv{hue: 0, sat: 255, val: 255},
Hsv{hue: 21, sat: 255, val: 255},
Hsv{hue: 42, sat: 255, val: 255},
Hsv{hue: 64, sat: 255, val: 255},
Hsv{hue: 85, sat: 255, val: 255},
Hsv{hue: 106, sat: 255, val: 255},
Hsv{hue: 127, sat: 255, val: 255},
Hsv{hue: 149, sat: 255, val: 255},
Hsv{hue: 170, sat: 255, val: 255},
Hsv{hue: 191, sat: 255, val: 255},
Hsv{hue: 212, sat: 255, val: 255},
Hsv{hue: 234, sat: 255, val: 255},
Hsv{hue: 255, sat: 255, val: 255},
Hsv{hue: 111, sat: 123, val: 35},
Hsv{hue: 21, sat: 3, val: 138},
Hsv{hue: 89, sat: 230, val: 42},
];
#[rustfmt::skip]
let rgb = [
RGB { r: 255, g: 0 , b: 0},
RGB { r: 255, g: 127, b: 0},
RGB { r: 255, g: 255, b: 0},
RGB { r: 127, g: 255, b: 0},
RGB { r: 0, g: 255, b: 0},
RGB { r: 0, g: 255, b: 127},
RGB { r: 0, g: 255, b: 255},
RGB { r: 0, g: 127, b: 255},
RGB { r: 0, g: 0 , b: 255},
RGB { r: 127, g: 0 , b: 255},
RGB { r: 255, g: 0 , b: 255},
RGB { r: 255, g: 0 , b: 127},
RGB { r: 255, g: 0 , b: 0},
RGB { r: 19, g: 35, b: 29},
RGB { r: 137, g: 137, b: 136},
RGB { r: 4, g: 41, b: 8},
];
for i in 0..hsv.len() {
let new_hsv = hsv2rgb(&hsv[i]);
assert!(distance(new_hsv.r, rgb[i].r) < 4);
assert!(distance(new_hsv.g, rgb[i].g) < 4);
assert!(distance(new_hsv.b, rgb[i].b) < 4);
}
}
#[test]
// if sat == 0 then all colors are equal
fn test_hsv2rgb_2() {
for i in 0..=255 {
#[rustfmt::skip]
let rgb = hsv2rgb(&Hsv{hue: i, sat: 0, val: 42});
assert! {rgb.r == rgb.b};
assert! {rgb.b == rgb.g};
}
}
#[test]
fn test_hsv2rgbw_1() {
#[rustfmt::skip]
let hsv = [
(Hsv{hue: 0, sat: 255, val: 255}, 255),
(Hsv{hue: 21, sat: 3, val: 138}, 128),
(Hsv{hue: 89, sat: 230, val: 42}, 0),
];
#[rustfmt::skip]
let rgb = [
RGBW { r: 255, g: 0, b: 0, a: White(255) },
RGBW { r: 137, g: 137, b: 136, a: White(128)},
RGBW { r: 4, g: 41, b: 8, a: White(0)},
];
for i in 0..hsv.len() {
let result = hsv2rgbw(&hsv[i].0, hsv[i].1);
assert!(distance(result.r, rgb[i].r) < 4);
assert!(distance(result.g, rgb[i].g) < 4);
assert!(distance(result.b, rgb[i].b) < 4);
assert_eq!(result.a, rgb[i].a);
}
}
}
| rust | Apache-2.0 | 61a0f6b48cf2e4521a13d1820a128a970035d707 | 2026-01-04T20:25:05.940189Z | false |
smart-leds-rs/smart-leds | https://github.com/smart-leds-rs/smart-leds/blob/61a0f6b48cf2e4521a13d1820a128a970035d707/src/colors.rs | src/colors.rs | use crate::RGB8;
/// See https://en.wikipedia.org/wiki/Web_colors
// 16 Original "Web" Colors
pub const WHITE: RGB8 = RGB8 { r: 0xFF, g: 0xFF, b: 0xFF };
pub const SILVER: RGB8 = RGB8 { r: 0xC0, g: 0xC0, b: 0xC0 };
pub const GRAY: RGB8 = RGB8 { r: 0x80, g: 0x80, b: 0x80 };
pub const BLACK: RGB8 = RGB8 { r: 0x00, g: 0x00, b: 0x00 };
pub const RED: RGB8 = RGB8 { r: 0xFF, g: 0x00, b: 0x00 };
pub const MAROON: RGB8 = RGB8 { r: 0x80, g: 0x00, b: 0x00 };
pub const YELLOW: RGB8 = RGB8 { r: 0xFF, g: 0xFF, b: 0x00 };
pub const OLIVE: RGB8 = RGB8 { r: 0x80, g: 0x80, b: 0x00 };
pub const LIME: RGB8 = RGB8 { r: 0x00, g: 0xFF, b: 0x00 };
pub const GREEN: RGB8 = RGB8 { r: 0x00, g: 0x80, b: 0x00 };
pub const AQUA: RGB8 = RGB8 { r: 0x00, g: 0xFF, b: 0xFF };
pub const TEAL: RGB8 = RGB8 { r: 0x00, g: 0x80, b: 0x80 };
pub const BLUE: RGB8 = RGB8 { r: 0x00, g: 0x00, b: 0xFF };
pub const NAVY: RGB8 = RGB8 { r: 0x00, g: 0x00, b: 0x80 };
pub const FUCHSIA: RGB8 = RGB8 { r: 0xFF, g: 0x00, b: 0xFF };
pub const PURPLE: RGB8 = RGB8 { r: 0x80, g: 0x00, b: 0x80 };
// Extended "X11" Colors
pub const PINK: RGB8 = RGB8 { r: 0xFF, g: 0xC0, b: 0xCB };
pub const LIGHT_PINK: RGB8 = RGB8 { r: 0xFF, g: 0xB6, b: 0xC1 };
pub const HOT_PINK: RGB8 = RGB8 { r: 0xFF, g: 0x69, b: 0xB4 };
pub const DEEP_PINK: RGB8 = RGB8 { r: 0xFF, g: 0x14, b: 0x93 };
pub const PALE_VIOLET_RED: RGB8 = RGB8 { r: 0xDB, g: 0x70, b: 0x93 };
pub const MEDIUM_VIOLET_RED: RGB8 = RGB8 { r: 0xC7, g: 0x15, b: 0x85 };
pub const LIGHT_SALMON: RGB8 = RGB8 { r: 0xFF, g: 0xA0, b: 0x7A };
pub const SALMON: RGB8 = RGB8 { r: 0xFA, g: 0x80, b: 0x72 };
pub const DARK_SALMON: RGB8 = RGB8 { r: 0xE9, g: 0x96, b: 0x7A };
pub const LIGHT_CORAL: RGB8 = RGB8 { r: 0xF0, g: 0x80, b: 0x80 };
pub const INDIAN_RED: RGB8 = RGB8 { r: 0xCD, g: 0x5C, b: 0x5C };
pub const CRIMSON: RGB8 = RGB8 { r: 0xDC, g: 0x14, b: 0x3C };
pub const FIREBRICK: RGB8 = RGB8 { r: 0xB2, g: 0x22, b: 0x22 };
pub const DARK_RED: RGB8 = RGB8 { r: 0x8B, g: 0x00, b: 0x00 };
pub const ORANGE_RED: RGB8 = RGB8 { r: 0xFF, g: 0x45, b: 0x00 };
pub const TOMATO: RGB8 = RGB8 { r: 0xFF, g: 0x63, b: 0x47 };
pub const CORAL: RGB8 = RGB8 { r: 0xFF, g: 0x7f, b: 0x50 };
pub const DARK_ORANGE: RGB8 = RGB8 { r: 0xFF, g: 0x8C, b: 0x00 };
pub const ORANGE: RGB8 = RGB8 { r: 0xFF, g: 0xA5, b: 0x00 };
pub const LIGHT_YELLOW: RGB8 = RGB8 { r: 0xFF, g: 0xFF, b: 0xE0 };
pub const LEMON_CHIFFON: RGB8 = RGB8 { r: 0xFF, g: 0xFA, b: 0xCD };
pub const LIGHT_GOLDENROD_YELLOW: RGB8 = RGB8 { r: 0xFA, g: 0xFA, b: 0xD2 };
pub const PAPAYA_WHIP: RGB8 = RGB8 { r: 0xFF, g: 0xEF, b: 0xD5 };
pub const MOCCASIN: RGB8 = RGB8 { r: 0xFF, g: 0xE4, b: 0xB5 };
pub const PEACH_PUFF: RGB8 = RGB8 { r: 0xFF, g: 0xDA, b: 0xB9 };
pub const PALE_GOLDENROD: RGB8 = RGB8 { r: 0xEE, g: 0xE8, b: 0xAA };
pub const KHAKI: RGB8 = RGB8 { r: 0xF0, g: 0xE6, b: 0x8C };
pub const DARK_KHAKI: RGB8 = RGB8 { r: 0xBD, g: 0xB7, b: 0x6B };
pub const GOLD: RGB8 = RGB8 { r: 0xFF, g: 0xD7, b: 0x00 };
pub const CORNSILK: RGB8 = RGB8 { r: 0xFF, g: 0xF8, b: 0xDC };
pub const BLANCHED_ALMOND: RGB8 = RGB8 { r: 0xFF, g: 0xEB, b: 0xCD };
pub const BISQUE: RGB8 = RGB8 { r: 0xFF, g: 0xE4, b: 0xC4 };
pub const NAVAJO_WHITE: RGB8 = RGB8 { r: 0xFF, g: 0xDE, b: 0xAD };
pub const WHEAT: RGB8 = RGB8 { r: 0xF5, g: 0xDE, b: 0xB3 };
pub const BURLYWOOD: RGB8 = RGB8 { r: 0xDE, g: 0xB8, b: 0x87 };
pub const TAN: RGB8 = RGB8 { r: 0xD2, g: 0xB4, b: 0x8C };
pub const ROSY_BROWN: RGB8 = RGB8 { r: 0xBC, g: 0x8F, b: 0x8F };
pub const SANDY_BROWN: RGB8 = RGB8 { r: 0xF4, g: 0xA4, b: 0x60 };
pub const GOLDENROD: RGB8 = RGB8 { r: 0xDA, g: 0xA5, b: 0x20 };
pub const DARK_GOLDENROD: RGB8 = RGB8 { r: 0xB8, g: 0x86, b: 0x0B };
pub const PERU: RGB8 = RGB8 { r: 0xCD, g: 0x85, b: 0x3F };
pub const CHOCOLATE: RGB8 = RGB8 { r: 0xD2, g: 0x69, b: 0x1E };
pub const SADDLE_BROWN: RGB8 = RGB8 { r: 0x8B, g: 0x45, b: 0x13 };
pub const SIENNA: RGB8 = RGB8 { r: 0xA0, g: 0x52, b: 0x2D };
pub const BROWN: RGB8 = RGB8 { r: 0xA5, g: 0x2A, b: 0x2A };
pub const DARK_OLIVE_GREEN: RGB8 = RGB8 { r: 0x55, g: 0x6B, b: 0x2F };
pub const OLIVE_DRAB: RGB8 = RGB8 { r: 0x6B, g: 0x8E, b: 0x23 };
pub const YELLOW_GREEN: RGB8 = RGB8 { r: 0x9A, g: 0xCD, b: 0x32 };
pub const LIME_GREEN: RGB8 = RGB8 { r: 0x32, g: 0xCD, b: 0x32 };
pub const LAWN_GREEN: RGB8 = RGB8 { r: 0x7C, g: 0xFC, b: 0x00 };
pub const CHARTREUSE: RGB8 = RGB8 { r: 0x7F, g: 0xFF, b: 0x00 };
pub const GREEN_YELLOW: RGB8 = RGB8 { r: 0xAD, g: 0xFF, b: 0x2F };
pub const SPRING_GREEN: RGB8 = RGB8 { r: 0x00, g: 0xFF, b: 0x7F };
pub const MEDIUM_SPRING_GREEN: RGB8 = RGB8 { r: 0x00, g: 0xFA, b: 0x9A };
pub const LIGHT_GREEN: RGB8 = RGB8 { r: 0x90, g: 0xEE, b: 0x90 };
pub const PALE_GREEN: RGB8 = RGB8 { r: 0x98, g: 0xFB, b: 0x98 };
pub const DARK_SEA_GREEN: RGB8 = RGB8 { r: 0x8F, g: 0xBC, b: 0x8F };
pub const MEDIUM_AQUAMARINE: RGB8 = RGB8 { r: 0x66, g: 0xCD, b: 0xAA };
pub const MEDIUM_SEA_GREEN: RGB8 = RGB8 { r: 0x3C, g: 0xB3, b: 0x71 };
pub const SEA_GREEN: RGB8 = RGB8 { r: 0x2E, g: 0x8B, b: 0x57 };
pub const FOREST_GREEN: RGB8 = RGB8 { r: 0x22, g: 0x8B, b: 0x22 };
pub const DARK_GREEN: RGB8 = RGB8 { r: 0x00, g: 0x64, b: 0x00 };
pub const CYAN: RGB8 = RGB8 { r: 0x00, g: 0xFF, b: 0xFF };
pub const LIGHT_CYAN: RGB8 = RGB8 { r: 0xE0, g: 0xFF, b: 0xFF };
pub const PALE_TURQUOISE: RGB8 = RGB8 { r: 0xAF, g: 0xEE, b: 0xEE };
pub const AQUAMARINE: RGB8 = RGB8 { r: 0x7F, g: 0xFF, b: 0xD4 };
pub const TURQUOISE: RGB8 = RGB8 { r: 0x40, g: 0xE0, b: 0xD0 };
pub const MEDIUM_TURQUOISE: RGB8 = RGB8 { r: 0x48, g: 0xD1, b: 0xCC };
pub const DARK_TURQUOISE: RGB8 = RGB8 { r: 0x00, g: 0xCE, b: 0xD1 };
pub const LIGHT_SEA_GREEN: RGB8 = RGB8 { r: 0x20, g: 0xB2, b: 0xAA };
pub const CADET_BLUE: RGB8 = RGB8 { r: 0x5F, g: 0x9E, b: 0xA0 };
pub const DARK_CYAN: RGB8 = RGB8 { r: 0x00, g: 0x8B, b: 0x8B };
pub const LIGHT_STEEL_BLUE: RGB8 = RGB8 { r: 0xB0, g: 0xC4, b: 0xDE };
pub const POWDER_BLUE: RGB8 = RGB8 { r: 0xB0, g: 0xE0, b: 0xE6 };
pub const LIGHT_BLUE: RGB8 = RGB8 { r: 0xAD, g: 0xD8, b: 0xE6 };
pub const SKY_BLUE: RGB8 = RGB8 { r: 0x87, g: 0xCE, b: 0xEB };
pub const LIGHT_SKY_BLUE: RGB8 = RGB8 { r: 0x87, g: 0xCE, b: 0xFA };
pub const DEEP_SKY_BLUE: RGB8 = RGB8 { r: 0x00, g: 0xBF, b: 0xFF };
pub const DODGER_BLUE: RGB8 = RGB8 { r: 0x1E, g: 0x90, b: 0xFF };
pub const CORNFLOWER_BLUE: RGB8 = RGB8 { r: 0x64, g: 0x95, b: 0xED };
pub const STEEL_BLUE: RGB8 = RGB8 { r: 0x46, g: 0x82, b: 0xB4 };
pub const ROYAL_BLUE: RGB8 = RGB8 { r: 0x41, g: 0x69, b: 0xE1 };
pub const MEDIUM_BLUE: RGB8 = RGB8 { r: 0x00, g: 0x00, b: 0xCD };
pub const DARK_BLUE: RGB8 = RGB8 { r: 0x00, g: 0x00, b: 0x8B };
pub const MIDNIGHT_BLUE: RGB8 = RGB8 { r: 0x19, g: 0x19, b: 0x70 };
pub const LAVENDER: RGB8 = RGB8 { r: 0xE6, g: 0xE6, b: 0xFA };
pub const THISTLE: RGB8 = RGB8 { r: 0xD8, g: 0xBF, b: 0xD8 };
pub const PLUM: RGB8 = RGB8 { r: 0xDD, g: 0xA0, b: 0xDD };
pub const VIOLET: RGB8 = RGB8 { r: 0xEE, g: 0x82, b: 0xEE };
pub const ORCHID: RGB8 = RGB8 { r: 0xDA, g: 0x70, b: 0xD6 };
pub const MAGENTA: RGB8 = RGB8 { r: 0xFF, g: 0x00, b: 0xFF };
pub const MEDIUM_ORCHID: RGB8 = RGB8 { r: 0xBA, g: 0x55, b: 0xD3 };
pub const MEDIUM_PURPLE: RGB8 = RGB8 { r: 0x93, g: 0x70, b: 0xDB };
pub const BLUE_VIOLET: RGB8 = RGB8 { r: 0x8A, g: 0x2B, b: 0xE2 };
pub const DARK_VIOLET: RGB8 = RGB8 { r: 0x94, g: 0x00, b: 0xD3 };
pub const DARK_ORCHID: RGB8 = RGB8 { r: 0x99, g: 0x32, b: 0xCC };
pub const DARK_MAGENTA: RGB8 = RGB8 { r: 0x8B, g: 0x00, b: 0x8B };
pub const INDIGO: RGB8 = RGB8 { r: 0x4B, g: 0x00, b: 0x82 };
pub const DARK_SLATE_BLUE: RGB8 = RGB8 { r: 0x4B, g: 0x3D, b: 0x8B };
pub const SLATE_BLUE: RGB8 = RGB8 { r: 0x6A, g: 0x5A, b: 0xCD };
pub const MEDIUM_SLATE_BLUE: RGB8 = RGB8 { r: 0x7B, g: 0x68, b: 0xEE };
pub const SNOW: RGB8 = RGB8 { r: 0xFF, g: 0xFA, b: 0xFA };
pub const HONEYDEW: RGB8 = RGB8 { r: 0xF0, g: 0xFF, b: 0xF0 };
pub const MINT_CREAM: RGB8 = RGB8 { r: 0xF5, g: 0xFF, b: 0xFA };
pub const AZURE: RGB8 = RGB8 { r: 0xF0, g: 0xFF, b: 0xFF };
pub const ALICE_BLUE: RGB8 = RGB8 { r: 0xF0, g: 0xF8, b: 0xFF };
pub const GHOST_WHITE: RGB8 = RGB8 { r: 0xF8, g: 0xF8, b: 0xFF };
pub const WHITE_SMOKE: RGB8 = RGB8 { r: 0xF5, g: 0xF5, b: 0xF5 };
pub const SEASHELL: RGB8 = RGB8 { r: 0xFF, g: 0xF5, b: 0xEE };
pub const BEIGE: RGB8 = RGB8 { r: 0xF5, g: 0xF5, b: 0xDC };
pub const OLD_LACE: RGB8 = RGB8 { r: 0xFD, g: 0xF5, b: 0xE6 };
pub const FLORAL_WHITE: RGB8 = RGB8 { r: 0xFF, g: 0xFA, b: 0xF0 };
pub const IVORY: RGB8 = RGB8 { r: 0xFF, g: 0xFF, b: 0xF0 };
pub const ANTINQUE_WHITE: RGB8 = RGB8 { r: 0xFA, g: 0xEB, b: 0xD7 };
pub const LINEN: RGB8 = RGB8 { r: 0xFA, g: 0xF0, b: 0xE6 };
pub const LAVENDER_BLUSH: RGB8 = RGB8 { r: 0xFF, g: 0xF0, b: 0xF5 };
pub const MISTY_ROSE: RGB8 = RGB8 { r: 0xFF, g: 0xE4, b: 0xE1 };
pub const GAINSBORO: RGB8 = RGB8 { r: 0xDC, g: 0xDC, b: 0xDC };
pub const LIGHT_GRAY: RGB8 = RGB8 { r: 0xD3, g: 0xD3, b: 0xD3 };
pub const DARK_GRAY: RGB8 = RGB8 { r: 0xA9, g: 0xA9, b: 0xA9 };
pub const DIM_GRAY: RGB8 = RGB8 { r: 0x69, g: 0x69, b: 0x69 };
pub const LIGHT_SLATE_GRAY: RGB8 = RGB8 { r: 0x77, g: 0x88, b: 0x99 };
pub const SLATE_GRAY: RGB8 = RGB8 { r: 0x70, g: 0x80, b: 0x90 };
pub const DARK_SLATE_GRAY: RGB8 = RGB8 { r: 0x2F, g: 0x4F, b: 0x4F };
| rust | Apache-2.0 | 61a0f6b48cf2e4521a13d1820a128a970035d707 | 2026-01-04T20:25:05.940189Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/config.rs | src/config.rs | use serde::Deserialize;
use std::fs;
#[derive(Deserialize)]
struct AppConfig {
url: String,
port: u16,
}
#[derive(Deserialize)]
struct DaoConfig {
user: String,
password: String,
address: String,
database: String,
}
#[derive(Deserialize)]
pub struct Config {
app: AppConfig,
dao: DaoConfig,
}
impl Config {
pub fn from_file(path: &'static str) -> Self {
let config = fs::read_to_string(path).unwrap();
serde_json::from_str(&config).unwrap()
}
pub fn get_app_url(&self) -> String {
format!("{0}:{1}", self.app.url, self.app.port)
}
pub fn get_database_url(&self) -> String {
format!(
"mysql://{0}:{1}@{2}/{3}",
self.dao.user, self.dao.password, self.dao.address, self.dao.database
)
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/lib.rs | src/lib.rs | use crate::dao::Database;
use std::sync::{Arc, Mutex};
pub mod config;
pub mod controller;
pub mod dao;
pub mod model;
// AppState
// This the primary dependency for our application's dependency injection.
// Each controller_test function that interacts with the database will require an `AppState` instance in
// order to communicate with the database.
pub struct AppState<'a> {
pub connections: Mutex<u32>,
pub context: Arc<Database<'a>>,
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/main.rs | src/main.rs | use actix_web::{web, App, HttpServer};
use sqlx_user_crud::config::Config;
use sqlx_user_crud::dao::Database;
use sqlx_user_crud::{controller, AppState};
use std::sync::{Arc, Mutex};
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("=== SQLX User CRUD ===");
// Read in the configuration file.
// In small projects this can be a local configuration, but in more sophisticated systems, it is
// best practice to keep the configuration file on a remote server where it can be retrieved
// with an http request.
let config_file: &'static str = "config.json";
let config = Config::from_file(config_file);
println!("Using configuration file from {0}", config_file);
// Connect to the database
let db_context = Database::new(&config.get_database_url()).await;
println!("Connected to database: {0}", config.get_database_url());
// Instantiate the app_state. This application state will be cloned for each Actix thread but
// the Arc of the DbContext will be reused in each Actix thread.
let app_state = web::Data::new(AppState {
connections: Mutex::new(0),
context: Arc::new(db_context),
});
// Start the web application.
// We'll need to transfer ownership of the AppState to the HttpServer via the `move`.
// Then we can instantiate our controllers.
let app = HttpServer::new(move || {
App::new()
.app_data(app_state.clone())
.configure(controller::init_index_controller)
.configure(controller::init_user_controller)
.configure(controller::init_group_controller)
})
.bind(config.get_app_url())?;
println!("Listening on: {0}", config.get_app_url());
app.run().await
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/controller/role_controller.rs | src/controller/role_controller.rs | use super::log_request;
use super::AppState;
use actix_web::{delete, get, patch, post, web, HttpResponse, Responder};
use serde::{Deserialize, Serialize};
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(get_role_by_id);
cfg.service(post_role);
cfg.service(patch_role_by_name);
cfg.service(delete_role_by_name);
}
#[get("/role/{id}")]
async fn get_role_by_id(
group_id: web::Path<i32>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("GET: /group", &app_state.connections);
let x = app_state
.context
.roles
.get_role_by_id(group_id.into_inner())
.await;
match x {
Err(_) => HttpResponse::NotFound().finish(),
Ok(group) => HttpResponse::Ok().json(group),
}
}
#[derive(Deserialize, Serialize)]
pub struct RoleAdd {
pub role_name: String,
pub realm_name: String,
pub max: Option<i32>,
}
#[post("/role")]
async fn post_role(
role: web::Json<RoleAdd>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("POST: /role", &app_state.connections);
let realm = app_state.context.realms
.get_realm_by_name(role.realm_name.as_str())
.await;
if realm.is_err() {
return HttpResponse::NotFound().finish();
}
let realm = realm.unwrap();
let x = app_state.context.roles.add_role(&realm, role.realm_name.as_str(), &role.max).await;
match x {
Ok(_) => {
let group = app_state
.context
.roles
.get_role_by_name(role.role_name.as_str())
.await;
match group {
Ok(g) => HttpResponse::Accepted().json(g),
_ => HttpResponse::InternalServerError().finish(),
}
}
_ => HttpResponse::InternalServerError().finish(),
}
}
#[derive(Deserialize, Serialize)]
pub struct RoleUpdate {
pub old: String,
pub new: String,
}
#[patch("/role")]
async fn patch_role_by_name(
update: web::Json<RoleUpdate>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("PATCH: /user", &app_state.connections);
let x = app_state
.context
.roles
.update_role(&update.old, &update.new)
.await;
match x {
Err(e) => HttpResponse::InternalServerError().body(format!("Error: {}", e)),
Ok(_) => HttpResponse::Accepted().body(update.new.clone()),
}
}
#[delete("/role/{name}")]
async fn delete_role_by_name(
name: web::Path<String>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("DELETE: /group", &app_state.connections);
let x = app_state.context.roles.delete_role(name.as_str()).await;
match x {
Err(e) => HttpResponse::InternalServerError().body(format!("Error: {}", e)),
Ok(_) => HttpResponse::Ok().body(format!("Successfully deleted group {}", name)),
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/controller/user_controller.rs | src/controller/user_controller.rs | use super::log_request;
use super::AppState;
use crate::model::User;
use actix_web::{delete, get, patch, post, web, HttpResponse, Responder};
use uuid::Uuid;
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(get_user);
cfg.service(post_user);
cfg.service(patch_user);
cfg.service(delete_user);
}
#[get("/user/{id}")]
async fn get_user(
user_id: web::Path<String>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("GET: /user", &app_state.connections);
let user = app_state.context.users.get_user_by_id(&user_id).await;
match user {
Err(_) => HttpResponse::NotFound().finish(),
Ok(mut user) => {
let groups = app_state
.context
.users_to_groups
.get_groups_by_user_id(&user.id)
.await;
match groups {
Err(_) => HttpResponse::InternalServerError().finish(),
Ok(groups) => {
user.groups = groups;
HttpResponse::Ok().json(user)
}
}
}
}
}
#[post("/user")]
async fn post_user(user: web::Json<User>, app_state: web::Data<AppState<'_>>) -> impl Responder {
log_request("POST: /user", &app_state.connections);
let mut user = user.into_inner();
user.id = Uuid::new_v4().to_string();
let x = app_state.context.users.add_user(&user).await;
match x {
Ok(_) => {
if user.groups.len() > 0 {
let _ = app_state
.context
.users_to_groups
.add_user_groups(&user.id, &user.groups)
.await;
}
HttpResponse::Accepted().body(user.id)
}
Err(_) => HttpResponse::InternalServerError().finish(),
}
}
#[patch("/user")]
async fn patch_user(user: web::Json<User>, app_state: web::Data<AppState<'_>>) -> impl Responder {
log_request("PATCH: /user", &app_state.connections);
let user = user.into_inner();
let x = app_state.context.users.update_user(&user).await;
match x {
Ok(0) => HttpResponse::NotFound().finish(),
Ok(_) => {
let _ = app_state
.context
.users_to_groups
.update_user_groups(&user)
.await;
HttpResponse::Accepted().json(user)
}
_ => HttpResponse::InternalServerError().finish(),
}
}
#[delete("/user/{id}")]
async fn delete_user(id: web::Path<String>, app_state: web::Data<AppState<'_>>) -> impl Responder {
log_request("DELETE: /user", &app_state.connections);
let x = app_state.context.users.delete_user(id.as_str()).await;
match x {
Ok(0) => HttpResponse::NotFound().finish(),
Ok(_) => HttpResponse::Ok().finish(),
Err(_) => HttpResponse::InternalServerError().finish(),
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/controller/group_controller.rs | src/controller/group_controller.rs | use super::log_request;
use super::AppState;
use actix_web::{delete, get, patch, post, web, HttpResponse, Responder};
use serde::{Deserialize, Serialize};
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(get_group_by_id);
cfg.service(post_group);
cfg.service(patch_group_by_name);
cfg.service(delete_group_by_name);
}
#[get("/group/{id}")]
async fn get_group_by_id(
group_id: web::Path<u64>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("GET: /group", &app_state.connections);
let x = app_state
.context
.groups
.get_group_by_id(group_id.into_inner())
.await;
match x {
Err(_) => HttpResponse::NotFound().finish(),
Ok(group) => HttpResponse::Ok().json(group),
}
}
#[post("/group")]
async fn post_group(
group: web::Json<String>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("POST: /group", &app_state.connections);
let x = app_state.context.groups.add_group(group.as_str()).await;
match x {
Ok(_) => {
let group = app_state
.context
.groups
.get_group_by_name(group.as_str())
.await;
match group {
Ok(g) => HttpResponse::Accepted().json(g),
_ => HttpResponse::InternalServerError().finish(),
}
}
_ => HttpResponse::InternalServerError().finish(),
}
}
#[derive(Deserialize, Serialize)]
pub struct GroupUpdate {
pub old: String,
pub new: String,
}
#[patch("/group")]
async fn patch_group_by_name(
update: web::Json<GroupUpdate>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("PATCH: /user", &app_state.connections);
let x = app_state
.context
.groups
.update_group(&update.old, &update.new)
.await;
match x {
Err(e) => HttpResponse::InternalServerError().body(format!("Error: {}", e)),
Ok(_) => HttpResponse::Accepted().body(update.new.clone()),
}
}
#[delete("/group/{name}")]
async fn delete_group_by_name(
name: web::Path<String>,
app_state: web::Data<AppState<'_>>,
) -> impl Responder {
log_request("DELETE: /group", &app_state.connections);
let x = app_state.context.groups.delete_group(name.as_str()).await;
match x {
Err(e) => HttpResponse::InternalServerError().body(format!("Error: {}", e)),
Ok(_) => HttpResponse::Ok().body(format!("Successfully deleted group {}", name)),
}
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
jamesjmeyer210/actix_sqlx_mysql_user_crud | https://github.com/jamesjmeyer210/actix_sqlx_mysql_user_crud/blob/c854e966c5d2bc01b07307d070e82ef511c3a900/src/controller/mod.rs | src/controller/mod.rs | use super::AppState;
use std::sync::Mutex;
pub mod group_controller;
pub mod index_controller;
pub mod user_controller;
pub use group_controller::init as init_group_controller;
pub use index_controller::init as init_index_controller;
pub use user_controller::init as init_user_controller;
fn log_request(route: &'static str, connections: &Mutex<u32>) {
let mut con = connections.lock().unwrap();
*con += 1;
println!("{}\n\tconnections: {}", route, con);
}
| rust | Unlicense | c854e966c5d2bc01b07307d070e82ef511c3a900 | 2026-01-04T20:25:06.640301Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.