repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/rtmp_server/actor/tests/rtmp_client.rs | mmids-rtmp/src/rtmp_server/actor/tests/rtmp_client.rs | use bytes::Bytes;
use mmids_core::net::tcp::{
OutboundPacket, RequestFailureReason, TcpSocketRequest, TcpSocketResponse,
};
use mmids_core::net::ConnectionId;
use mmids_core::test_utils;
use rml_rtmp::handshake::{Handshake, HandshakeProcessResult, PeerType};
use rml_rtmp::sessions::{
ClientSession, ClientSessionConfig, ClientSessionError, ClientSessionEvent,
ClientSessionResult, PublishRequestType, StreamMetadata,
};
use rml_rtmp::time::RtmpTimestamp;
use std::net::{SocketAddr, SocketAddrV4};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::time::timeout;
pub const CONNECTION_ID: &str = "test-1234";
pub struct RtmpTestClient {
socket_manager_receiver: UnboundedReceiver<TcpSocketRequest>,
socket_manager_response_sender: Option<UnboundedSender<TcpSocketResponse>>,
port: Option<u16>,
connection: Option<Connection>,
}
struct Connection {
incoming_bytes: UnboundedSender<Bytes>,
outgoing_bytes: UnboundedReceiver<OutboundPacket>,
session: ClientSession,
}
impl RtmpTestClient {
pub fn new() -> (Self, UnboundedSender<TcpSocketRequest>) {
let (sender, receiver) = unbounded_channel();
let client = RtmpTestClient {
socket_manager_receiver: receiver,
socket_manager_response_sender: None,
port: None,
connection: None,
};
(client, sender)
}
pub async fn accept_port_request(&mut self, port: u16, use_tls: bool) {
let request = test_utils::expect_mpsc_response(&mut self.socket_manager_receiver).await;
match request {
TcpSocketRequest::OpenPort {
port: requested_port,
use_tls: requested_tls,
response_channel,
} => {
assert_eq!(
requested_port, port,
"Requested port was not the expected port"
);
assert_eq!(
requested_tls, use_tls,
"Requested TLS flag was not expected"
);
if response_channel.is_closed() {
panic!("Response channel was closed");
}
if self.socket_manager_response_sender.is_some() {
panic!("Port already registered");
}
let _ = response_channel.send(TcpSocketResponse::RequestAccepted {});
self.socket_manager_response_sender = Some(response_channel);
self.port = Some(port);
}
}
}
pub async fn deny_port_request(&mut self, port: u16, use_tls: bool) {
let request = test_utils::expect_mpsc_response(&mut self.socket_manager_receiver).await;
match request {
TcpSocketRequest::OpenPort {
port: requested_port,
use_tls: requested_tls,
response_channel,
} => {
assert_eq!(
requested_port, port,
"Requested port was not the expected port"
);
assert_eq!(
requested_tls, use_tls,
"Requested TLS flag was not expected"
);
if response_channel.is_closed() {
panic!("Response channel was closed");
}
if self.socket_manager_response_sender.is_some() {
panic!("Port already registered");
}
let _ = response_channel.send(TcpSocketResponse::RequestDenied {
reason: RequestFailureReason::PortInUse,
});
}
}
}
pub async fn expect_empty_request_channel(&mut self) {
test_utils::expect_mpsc_timeout(&mut self.socket_manager_receiver).await;
}
pub async fn assert_connection_sender_closed(&mut self) {
let connection = self
.connection
.as_mut()
.expect("Connection not established yet");
let wait_time = Duration::from_millis(10);
if timeout(wait_time, connection.incoming_bytes.closed())
.await
.is_err()
{
panic!("Response sender not closed as expected (not disconnected");
}
}
pub async fn perform_handshake(&mut self) {
if self.connection.is_some() {
panic!("Only one connection is supported at a time");
}
let connection_id = ConnectionId(Arc::new(CONNECTION_ID.to_string()));
let (incoming_sender, incoming_receiver) = unbounded_channel();
let (outgoing_sender, mut outgoing_receiver) = unbounded_channel();
self.socket_manager_response_sender
.as_ref()
.unwrap()
.send(TcpSocketResponse::NewConnection {
port: self.port.unwrap(),
connection_id: connection_id.clone(),
incoming_bytes: incoming_receiver,
outgoing_bytes: outgoing_sender,
socket_address: SocketAddr::V4(SocketAddrV4::new([127, 0, 0, 1].into(), 1234)),
})
.expect("Failed to send new connection signal");
let mut handshake = Handshake::new(PeerType::Client);
let p0_and_p1 = handshake
.generate_outbound_p0_and_p1()
.expect("Failed to generate p0 and p1");
incoming_sender
.send(Bytes::from(p0_and_p1))
.expect("incoming bytes channel closed");
let response = test_utils::expect_mpsc_response(&mut outgoing_receiver).await;
let result = handshake
.process_bytes(&response.bytes)
.expect("Failed to process received p0 and p1 packet");
let response_bytes = match result {
HandshakeProcessResult::InProgress { response_bytes } => response_bytes,
HandshakeProcessResult::Completed { .. } => {
panic!("Did not expect to be completed after first packet")
}
};
incoming_sender
.send(Bytes::from(response_bytes))
.expect("Incoming bytes channel closed");
let response = test_utils::expect_mpsc_response(&mut outgoing_receiver).await;
let result = handshake
.process_bytes(&response.bytes)
.expect("Failed to process p2 packet");
match result {
HandshakeProcessResult::InProgress { .. } => {
panic!("Did not expect to still be in progress after 2nd packet")
}
HandshakeProcessResult::Completed {
remaining_bytes, ..
} => {
if !remaining_bytes.is_empty() {
panic!("Expected no leftover bytes after handshake completed");
}
}
}
let (mut session, client_results) = ClientSession::new(ClientSessionConfig::new())
.expect("Failed to generate client session");
for result in client_results {
match result {
ClientSessionResult::OutboundResponse(packet) => {
incoming_sender
.send(Bytes::from(packet.bytes))
.expect("Incoming bytes channel closed");
}
x => panic!("Unexpected session result of {:?}", x),
}
}
// Handle any initial messages the server may send (like chunks size)
loop {
let packet = match timeout(Duration::from_millis(10), outgoing_receiver.recv()).await {
Ok(Some(packet)) => packet,
Ok(None) => panic!("outgoing receiver sender closed"),
Err(_) => break,
};
let results = session
.handle_input(&packet.bytes)
.expect("Error processing bytes");
for result in results {
if let ClientSessionResult::OutboundResponse(packet) = result {
incoming_sender
.send(Bytes::from(packet.bytes))
.expect("Incoming bytes channel closed");
}
}
}
self.connection = Some(Connection {
session,
incoming_bytes: incoming_sender,
outgoing_bytes: outgoing_receiver,
})
}
pub async fn connect_to_app(&mut self, app: String, should_succeed: bool) {
self.execute_session_method_single_result(|session| session.request_connection(app));
if should_succeed {
let connection = self.connection.as_mut().unwrap();
let response = test_utils::expect_mpsc_response(&mut connection.outgoing_bytes).await;
let results = connection
.session
.handle_input(&response.bytes)
.expect("Failed to process results");
// Client will send back an event and a window acknowledgement message
let mut event_raised = false;
for result in results {
if let ClientSessionResult::RaisedEvent(
ClientSessionEvent::ConnectionRequestAccepted,
) = result
{
event_raised = true;
}
}
if !event_raised {
panic!("No connection request accepted event raised");
}
}
}
pub async fn publish_to_stream_key(&mut self, stream_key: String, should_succeed: bool) {
self.execute_session_method_single_result(|session| {
session.request_publishing(stream_key, PublishRequestType::Live)
});
// `createStream` should always succeed
let receiver = &mut self.connection.as_mut().unwrap().outgoing_bytes;
let response = test_utils::expect_mpsc_response(receiver).await;
// handle create stream response
self.execute_session_method_vec_result(|session| session.handle_input(&response.bytes));
if should_succeed {
let connection = self.connection.as_mut().unwrap();
let mut all_results = Vec::new();
loop {
let response = match timeout(
Duration::from_millis(10),
connection.outgoing_bytes.recv(),
)
.await
{
Ok(Some(response)) => response,
Ok(None) => panic!("Outgoing bytes channel closed"),
Err(_) => break, // no more packets coming in
};
let results = connection
.session
.handle_input(&response.bytes)
.expect("Failed to process results");
all_results.extend(results);
}
assert_eq!(all_results.len(), 1, "Only one result expected");
match all_results.remove(0) {
ClientSessionResult::RaisedEvent(ClientSessionEvent::PublishRequestAccepted) => (),
result => panic!("Unexpected result seen: {:?}", result),
}
}
}
pub async fn watch_stream_key(&mut self, stream_key: String, should_succeed: bool) {
self.execute_session_method_single_result(|session| session.request_playback(stream_key));
// `createStream` should always succeed
let receiver = &mut self.connection.as_mut().unwrap().outgoing_bytes;
let response = test_utils::expect_mpsc_response(receiver).await;
self.execute_session_method_vec_result(|session| session.handle_input(&response.bytes));
if should_succeed {
let connection = self.connection.as_mut().unwrap();
let mut all_results = Vec::new();
loop {
let response = match timeout(
Duration::from_millis(10),
connection.outgoing_bytes.recv(),
)
.await
{
Ok(Some(response)) => response,
Ok(None) => panic!("Outgoing bytes channel closed"),
Err(_) => break, // no more packets coming in
};
let results = connection
.session
.handle_input(&response.bytes)
.expect("Failed to process results");
all_results.extend(results);
}
let mut accepted_event_received = false;
for result in all_results {
if let ClientSessionResult::RaisedEvent(
ClientSessionEvent::PlaybackRequestAccepted,
) = result
{
accepted_event_received = true;
}
}
assert!(
accepted_event_received,
"PlaybackRequestAccepted event not raised"
);
}
}
pub async fn stop_watching(&mut self) {
self.execute_session_method_vec_result(|session| session.stop_playback());
}
pub fn disconnect(&mut self) {
self.connection = None;
}
pub async fn stop_publishing(&mut self) {
self.execute_session_method_vec_result(|session| session.stop_publishing());
}
pub fn publish_metadata(&mut self, metadata: StreamMetadata) {
self.execute_session_method_single_result(|session| session.publish_metadata(&metadata));
}
pub fn publish_video(&mut self, data: Bytes, timestamp: RtmpTimestamp) {
self.execute_session_method_single_result(|session| {
session.publish_video_data(data, timestamp, false)
});
}
pub fn publish_audio(&mut self, data: Bytes, timestamp: RtmpTimestamp) {
self.execute_session_method_single_result(|session| {
session.publish_audio_data(data, timestamp, false)
});
}
pub fn execute_session_method_single_result(
&mut self,
function: impl FnOnce(&mut ClientSession) -> Result<ClientSessionResult, ClientSessionError>,
) {
let connection = self
.connection
.as_mut()
.expect("Connection not established yet");
let result = function(&mut connection.session).expect("Client session returned error");
match result {
ClientSessionResult::OutboundResponse(packet) => connection
.incoming_bytes
.send(Bytes::from(packet.bytes))
.expect("Failed to send stop publishing command"),
x => panic!("Unexpected session result: {:?}", x),
}
}
fn execute_session_method_vec_result(
&mut self,
function: impl FnOnce(
&mut ClientSession,
) -> Result<Vec<ClientSessionResult>, ClientSessionError>,
) {
let connection = self
.connection
.as_mut()
.expect("Connection not established yet");
let results = function(&mut connection.session).expect("Client session returned error");
for result in results {
match result {
ClientSessionResult::OutboundResponse(packet) => connection
.incoming_bytes
.send(Bytes::from(packet.bytes))
.expect("Failed to send packet"),
x => panic!("Unexpected session result: {:?}", x),
}
}
}
pub async fn get_next_event(&mut self) -> Option<ClientSessionEvent> {
let connection = self
.connection
.as_mut()
.expect("Connection not established yet");
loop {
let packet =
match timeout(Duration::from_millis(10), connection.outgoing_bytes.recv()).await {
Ok(Some(packet)) => packet,
_ => break,
};
let results = connection
.session
.handle_input(&packet.bytes)
.expect("Failed to handle packet");
for result in results {
if let ClientSessionResult::RaisedEvent(event) = result {
return Some(event);
}
}
}
None
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/rtmp_server/actor/tests/mod.rs | mmids-rtmp/src/rtmp_server/actor/tests/mod.rs | use crate::rtmp_server::actor::tests::rtmp_client::RtmpTestClient;
use crate::rtmp_server::actor::tests::test_context::TestContextBuilder;
use crate::rtmp_server::{
start_rtmp_server_endpoint, IpRestriction, RtmpEndpointMediaData, RtmpEndpointMediaMessage,
RtmpEndpointPublisherMessage, RtmpEndpointRequest, RtmpEndpointWatcherNotification,
StreamKeyRegistration, ValidationResponse,
};
use bytes::Bytes;
use mmids_core::test_utils;
use rml_rtmp::sessions::{ClientSessionEvent, StreamMetadata};
use rml_rtmp::time::RtmpTimestamp;
use std::sync::Arc;
use tokio::sync::mpsc::unbounded_channel;
mod rtmp_client;
mod test_context;
#[tokio::test]
async fn can_register_for_specific_port_for_publishers() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn can_register_with_tls_enabled() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: true,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, true).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn endpoint_publisher_receives_failed_when_port_rejected() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.deny_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn multiple_requests_for_same_port_only_sends_one_request_to_socket_manager() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app2".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
client.expect_empty_request_channel().await;
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_publisher_rejected_on_same_app_when_both_any_stream_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_publisher_rejected_on_same_app_and_same_exact_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_publisher_rejected_on_same_app_when_first_request_is_for_any_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_publisher_rejected_on_same_app_when_first_request_is_for_specific_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_publisher_accepted_on_same_app_on_different_exact_keys() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("def".to_string())),
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn can_register_for_specific_port_for_watcher() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn endpoint_watcher_receives_failed_when_port_rejected() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.deny_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_watcher_rejected_on_same_app_when_both_any_stream_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender, mut receiver2) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_watcher_rejected_on_same_app_and_same_exact_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender, mut receiver2) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_watcher_rejected_on_same_app_when_first_request_is_for_any_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender, mut receiver2) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_watcher_rejected_on_same_app_when_first_request_is_for_specific_key() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender, mut receiver2) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_watcher_accepted_on_same_app_with_different_exact_keys() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender, mut receiver2) = unbounded_channel();
let (_media_sender, media_receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Exact(Arc::new("def".to_string())),
media_channel: media_receiver,
notification_channel: sender,
})
.expect("Endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn second_request_fails_if_tls_option_differs() {
let (mut client, sender) = RtmpTestClient::new();
let endpoint = start_rtmp_server_endpoint(sender);
let (sender, mut receiver) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: false,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender,
})
.expect("Endpoint request failed to send");
client.accept_port_request(9999, false).await;
let response = test_utils::expect_mpsc_response(&mut receiver).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
let (sender2, mut receiver2) = unbounded_channel();
endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 9999,
use_tls: true,
requires_registrant_approval: false,
stream_id: None,
ip_restrictions: IpRestriction::None,
rtmp_app: Arc::new("app2".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: sender2,
})
.expect("2nd endpoint request failed to send");
let response = test_utils::expect_mpsc_response(&mut receiver2).await;
match response {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => (),
x => panic!("Unexpected endpoint response: {:?}", x),
}
}
#[tokio::test]
async fn publisher_disconnected_if_connecting_to_wrong_app() {
let mut context = TestContextBuilder::new().into_publisher().await;
context.client.perform_handshake().await;
context
.client
.connect_to_app("adsfasdfadfs".to_string(), false)
.await;
context.client.assert_connection_sender_closed().await;
}
#[tokio::test]
async fn publisher_disconnected_if_connecting_to_wrong_stream_key() {
let mut context = TestContextBuilder::new()
.set_stream_key(StreamKeyRegistration::Exact(Arc::new("key".to_string())))
.into_publisher()
.await;
context.client.perform_handshake().await;
context
.client
.connect_to_app(context.rtmp_app.clone(), true)
.await;
context
.client
.publish_to_stream_key("abc".to_string(), false)
.await;
context.client.assert_connection_sender_closed().await;
}
#[tokio::test]
async fn publisher_can_connect_on_registered_app_and_stream_key() {
let mut context = TestContextBuilder::new()
.set_stream_key(StreamKeyRegistration::Exact(Arc::new("key".to_string())))
.into_publisher()
.await;
context.client.perform_handshake().await;
context
.client
.connect_to_app(context.rtmp_app.clone(), true)
.await;
context
.client
.publish_to_stream_key("key".to_string(), true)
.await;
let receiver = context.publish_receiver.as_mut().unwrap();
let response = test_utils::expect_mpsc_response(receiver).await;
match response {
RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_key,
connection_id,
stream_id: _,
reactor_update_channel: _,
} => {
assert_eq!(
stream_key.as_str(),
"key",
"Unexpected stream key in publisher connected message"
);
assert_eq!(
connection_id.0.as_str(),
rtmp_client::CONNECTION_ID,
"Unexpected connection id"
);
}
message => panic!("Unexpected publisher message: {:?}", message),
};
}
#[tokio::test]
async fn publish_stopped_notification_raised_on_disconnection() {
let mut context = TestContextBuilder::new().into_publisher().await;
context.set_as_active_publisher().await;
context.client.disconnect();
let receiver = context.publish_receiver.as_mut().unwrap();
let response = test_utils::expect_mpsc_response(receiver).await;
match response {
RtmpEndpointPublisherMessage::PublishingStopped { connection_id } => {
assert_eq!(
connection_id.0.as_str(),
rtmp_client::CONNECTION_ID,
"Unexpected connection id"
);
}
message => panic!("Unexpected publisher message: {:?}", message),
};
}
#[tokio::test]
async fn publish_stopped_when_rtmp_client_stops_publishing() {
let mut context = TestContextBuilder::new().into_publisher().await;
context.set_as_active_publisher().await;
context.client.stop_publishing().await;
let receiver = context.publish_receiver.as_mut().unwrap();
let response = test_utils::expect_mpsc_response(receiver).await;
match response {
RtmpEndpointPublisherMessage::PublishingStopped { connection_id } => {
assert_eq!(
connection_id.0.as_str(),
rtmp_client::CONNECTION_ID,
"Unexpected connection id"
);
}
message => panic!("Unexpected publisher message: {:?}", message),
};
}
#[tokio::test]
async fn notification_raised_when_video_published() {
let mut context = TestContextBuilder::new().into_publisher().await;
context.set_as_active_publisher().await;
let data = Bytes::from(vec![0x27, 2, 3, 4, 5, 6, 7]);
let timestamp = RtmpTimestamp::new(5);
context.client.publish_video(data.clone(), timestamp);
let receiver = context.publish_receiver.as_mut().unwrap();
let response = test_utils::expect_mpsc_response(receiver).await;
match response {
RtmpEndpointPublisherMessage::NewVideoData {
publisher,
timestamp: event_timestamp,
data: event_data,
is_sequence_header: _,
is_keyframe: _,
composition_time_offset: _,
} => {
assert_eq!(
publisher.0.as_str(),
rtmp_client::CONNECTION_ID,
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | true |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/external_stream_handler.rs | mmids-rtmp/src/workflow_steps/external_stream_handler.rs | use downcast_rs::{impl_downcast, Downcast};
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::StepFutureResult;
use mmids_core::StreamId;
/// Trait used to handle different external resources for a single stream
pub trait ExternalStreamHandler {
fn prepare_stream(&mut self, stream_name: &str, futures_channel: &WorkflowStepFuturesChannel);
fn stop_stream(&mut self);
fn handle_resolved_future(
&mut self,
future: Box<dyn StreamHandlerFutureResult>,
) -> ResolvedFutureStatus;
}
/// Allows creating a new external stream handler for any stream
pub trait ExternalStreamHandlerGenerator {
fn generate(&self, stream_id: StreamId) -> Box<dyn ExternalStreamHandler + Sync + Send>;
}
pub struct StreamHandlerFutureWrapper {
pub stream_id: StreamId,
pub future: Box<dyn StreamHandlerFutureResult + Sync + Send>,
}
impl StepFutureResult for StreamHandlerFutureWrapper {}
impl_downcast!(StreamHandlerFutureResult);
pub trait StreamHandlerFutureResult: Downcast {}
pub enum ResolvedFutureStatus {
Success,
StreamShouldBeStopped,
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/external_stream_reader.rs | mmids-rtmp/src/workflow_steps/external_stream_reader.rs | use super::external_stream_handler::{ExternalStreamHandler, StreamHandlerFutureWrapper};
use crate::rtmp_server::{
IpRestriction, RegistrationType, RtmpEndpointMediaData, RtmpEndpointMediaMessage,
RtmpEndpointRequest, RtmpEndpointWatcherNotification, StreamKeyRegistration,
};
use crate::workflow_steps::external_stream_handler::{
ExternalStreamHandlerGenerator, ResolvedFutureStatus,
};
use mmids_core::workflows::metadata::MetadataKey;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{StepFutureResult, StepOutputs, StepStatus};
use mmids_core::workflows::{MediaNotification, MediaNotificationContent};
use mmids_core::StreamId;
use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use tracing::{error, info, warn};
/// Represents logic for a basic workflow step that exposes streams to an RTMP endpoint
/// so that an external system can read the video stream. This exposes a read-only interface for
/// media, which means the external system is not expected to push media back into the same workflow
/// as the same identifiable stream. An example of this is providing media for ffmpeg to generate
/// HLS feeds for.
///
/// Since this is a read-only interface all media passed into it will flow as-is to the next
/// workflow step.
pub struct ExternalStreamReader {
pub status: StepStatus,
rtmp_server_endpoint: UnboundedSender<RtmpEndpointRequest>,
watcher_app_name: Arc<String>,
active_streams: HashMap<StreamId, ActiveStream>,
stream_handler_generator: Box<dyn ExternalStreamHandlerGenerator + Sync + Send>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
#[derive(Debug)]
enum WatchRegistrationStatus {
Inactive,
Pending {
media_channel: UnboundedSender<RtmpEndpointMediaMessage>,
},
Active {
media_channel: UnboundedSender<RtmpEndpointMediaMessage>,
},
}
struct ActiveStream {
id: StreamId,
stream_name: Arc<String>,
pending_media: VecDeque<MediaNotificationContent>,
rtmp_output_status: WatchRegistrationStatus,
external_stream_handler: Box<dyn ExternalStreamHandler + Sync + Send>,
}
enum FutureResult {
RtmpEndpointGone,
WatchChannelGone(StreamId),
WatchNotificationReceived(StreamId, RtmpEndpointWatcherNotification),
}
impl StepFutureResult for FutureResult {}
impl ExternalStreamReader {
pub fn new(
watcher_rtmp_app_name: Arc<String>,
rtmp_server: UnboundedSender<RtmpEndpointRequest>,
external_handler_generator: Box<dyn ExternalStreamHandlerGenerator + Sync + Send>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
futures_channel: &WorkflowStepFuturesChannel,
) -> Self {
let step = ExternalStreamReader {
status: StepStatus::Active,
watcher_app_name: watcher_rtmp_app_name,
rtmp_server_endpoint: rtmp_server.clone(),
active_streams: HashMap::new(),
stream_handler_generator: external_handler_generator,
is_keyframe_metadata_key,
pts_offset_metadata_key,
};
futures_channel.send_on_generic_future_completion(async move {
rtmp_server.closed().await;
FutureResult::RtmpEndpointGone
});
step
}
pub fn handle_resolved_future(
&mut self,
notification: Box<dyn StepFutureResult>,
futures_channel: &WorkflowStepFuturesChannel,
) {
let notification = match notification.downcast::<StreamHandlerFutureWrapper>() {
Err(e) => e,
Ok(wrapper) => {
let result = if let Some(stream) = self.active_streams.get_mut(&wrapper.stream_id) {
stream
.external_stream_handler
.handle_resolved_future(wrapper.future)
} else {
ResolvedFutureStatus::Success
};
match result {
ResolvedFutureStatus::Success => {
self.prepare_stream(wrapper.stream_id, futures_channel)
}
ResolvedFutureStatus::StreamShouldBeStopped => {
self.stop_stream(&wrapper.stream_id);
}
}
return;
}
};
let notification = match notification.downcast::<FutureResult>() {
Ok(x) => *x,
Err(_) => return,
};
match notification {
FutureResult::RtmpEndpointGone => {
error!("RTMP endpoint is gone!");
self.status = StepStatus::Error {
message: "RTMP endpoint is gone".to_string(),
};
self.stop_all_streams();
}
FutureResult::WatchChannelGone(stream_id) => {
if self.stop_stream(&stream_id) {
error!(stream_id = ?stream_id, "Rtmp watch channel disappeared for stream id {:?}", stream_id);
}
}
FutureResult::WatchNotificationReceived(stream_id, notification) => {
if !self.active_streams.contains_key(&stream_id) {
// late notification after stopping a stream
return;
}
self.handle_rtmp_watch_notification(stream_id, notification, futures_channel);
}
}
}
pub fn handle_media(
&mut self,
media: MediaNotification,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
if let Some(stream) = self.active_streams.get(&media.stream_id) {
if &stream.stream_name != stream_name {
warn!(
stream_id = ?media.stream_id,
new_stream_name = %stream_name,
active_stream_name = %stream.stream_name,
"Unexpected new incoming stream notification received on \
stream id {:?} and stream name '{}', but we already have this stream id active \
for stream name '{}'. Ignoring this notification",
media.stream_id, stream_name, stream.stream_name);
} else {
// Since the stream id / name combination is already set, this is a duplicate
// notification. This is probably a bug somewhere but it's not harmful
// to ignore
}
return;
}
let stream = ActiveStream {
id: media.stream_id.clone(),
stream_name: stream_name.clone(),
pending_media: VecDeque::new(),
rtmp_output_status: WatchRegistrationStatus::Inactive,
external_stream_handler: self
.stream_handler_generator
.generate(media.stream_id.clone()),
};
self.active_streams.insert(media.stream_id.clone(), stream);
self.prepare_stream(media.stream_id.clone(), futures_channel);
}
MediaNotificationContent::StreamDisconnected => {
if self.stop_stream(&media.stream_id) {
info!(
stream_id = ?media.stream_id,
"Stopping stream id {:?} due to stream disconnection notification",
media.stream_id
);
}
}
_ => {
if let Some(stream) = self.active_streams.get_mut(&media.stream_id) {
if let WatchRegistrationStatus::Active { media_channel } =
&stream.rtmp_output_status
{
let media = media.clone();
if let Ok(media_data) =
RtmpEndpointMediaData::from_media_notification_content(
media.content,
self.is_keyframe_metadata_key,
self.pts_offset_metadata_key,
)
{
let _ = media_channel.send(RtmpEndpointMediaMessage {
stream_key: stream.id.0.clone(),
data: media_data,
});
}
} else {
stream.pending_media.push_back(media.content.clone());
}
}
}
}
outputs.media.push(media);
}
pub fn prepare_stream(
&mut self,
stream_id: StreamId,
futures_channel: &WorkflowStepFuturesChannel,
) {
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
let (output_is_active, output_media_channel) = match &stream.rtmp_output_status {
WatchRegistrationStatus::Inactive => {
let (media_sender, media_receiver) = unbounded_channel();
let (watch_sender, watch_receiver) = unbounded_channel();
let _ =
self.rtmp_server_endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
notification_channel: watch_sender,
rtmp_app: self.watcher_app_name.clone(),
rtmp_stream_key: StreamKeyRegistration::Exact(stream.id.0.clone()),
port: 1935,
media_channel: media_receiver,
ip_restrictions: IpRestriction::None,
use_tls: false,
requires_registrant_approval: false,
});
let stream_id = stream.id.clone();
let closed_stream_id = stream_id.clone();
futures_channel.send_on_generic_unbounded_recv(
watch_receiver,
move |event| {
FutureResult::WatchNotificationReceived(stream_id.clone(), event)
},
move || FutureResult::WatchChannelGone(closed_stream_id),
);
stream.rtmp_output_status = WatchRegistrationStatus::Pending {
media_channel: media_sender,
};
(false, None)
}
WatchRegistrationStatus::Pending { media_channel: _ } => (false, None),
WatchRegistrationStatus::Active { media_channel } => (true, Some(media_channel)),
};
if output_is_active {
// If the output is active, we need to send any pending media out. Most likely this
// will contain sequence headers, and thus we need to get them up to the rtmp endpoint
// so clients don't miss them
if let Some(media_channel) = output_media_channel {
for media in stream.pending_media.drain(..) {
if let Ok(media_data) =
RtmpEndpointMediaData::from_media_notification_content(
media,
self.is_keyframe_metadata_key,
self.pts_offset_metadata_key,
)
{
let _ = media_channel.send(RtmpEndpointMediaMessage {
stream_key: stream.id.0.clone(),
data: media_data,
});
}
}
}
stream
.external_stream_handler
.prepare_stream(&stream.stream_name, futures_channel);
}
}
}
pub fn stop_all_streams(&mut self) {
let ids: Vec<StreamId> = self.active_streams.keys().cloned().collect();
for id in ids {
self.stop_stream(&id);
}
}
fn stop_stream(&mut self, stream_id: &StreamId) -> bool {
if let Some(mut stream) = self.active_streams.remove(stream_id) {
stream.external_stream_handler.stop_stream();
let _ = self
.rtmp_server_endpoint
.send(RtmpEndpointRequest::RemoveRegistration {
registration_type: RegistrationType::Watcher,
port: 1935,
rtmp_app: self.watcher_app_name.clone(),
rtmp_stream_key: StreamKeyRegistration::Exact(stream.id.0),
});
return true;
}
false
}
fn handle_rtmp_watch_notification(
&mut self,
stream_id: StreamId,
notification: RtmpEndpointWatcherNotification,
futures_channel: &WorkflowStepFuturesChannel,
) {
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
match notification {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => {
let new_status = match &stream.rtmp_output_status {
WatchRegistrationStatus::Pending { media_channel } => {
info!(
stream_id = ?stream.id,
"Watch registration successful for stream id {:?}",
stream.id
);
Some(WatchRegistrationStatus::Active {
media_channel: media_channel.clone(),
})
}
status => {
error!(
stream_id = ?stream.id,
"Received watch registration successful notification for stream id \
{:?}, but this stream's watch status is {:?}", stream.id, status
);
None
}
};
if let Some(new_status) = new_status {
stream.rtmp_output_status = new_status;
}
}
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => {
warn!(
stream_id = ?stream.id,
"Received watch registration failed for stream id {:?}",
stream.id
);
stream.rtmp_output_status = WatchRegistrationStatus::Inactive;
}
RtmpEndpointWatcherNotification::StreamKeyBecameActive { .. } => (),
RtmpEndpointWatcherNotification::StreamKeyBecameInactive { .. } => (),
RtmpEndpointWatcherNotification::WatcherRequiringApproval { .. } => {
error!("Received request for approval but requests should be auto-approved");
self.status = StepStatus::Error {
message:
"Received request for approval but requests should be auto-approved"
.to_string(),
};
}
}
}
self.prepare_stream(stream_id, futures_channel);
}
}
impl Drop for ExternalStreamReader {
fn drop(&mut self) {
self.stop_all_streams();
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::rtmp_server::RtmpEndpointMediaData;
use crate::utils::hash_map_to_stream_metadata;
use crate::workflow_steps::external_stream_handler::StreamHandlerFutureResult;
use bytes::{Bytes, BytesMut};
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use mmids_core::workflows::definitions::WorkflowStepId;
use mmids_core::workflows::metadata::common_metadata::{
get_is_keyframe_metadata_key, get_pts_offset_metadata_key,
};
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKeyMap, MetadataValue,
};
use mmids_core::workflows::steps::futures_channel::{
FuturesChannelInnerResult, FuturesChannelResult,
};
use mmids_core::workflows::MediaType;
use mmids_core::{test_utils, VideoTimestamp};
use rml_rtmp::time::RtmpTimestamp;
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::time::timeout;
struct TestContext {
external_stream_reader: ExternalStreamReader,
rtmp_endpoint: UnboundedReceiver<RtmpEndpointRequest>,
prepare_stream_receiver: UnboundedReceiver<String>,
stop_stream_receiver: UnboundedReceiver<()>,
futures_channel_receiver: UnboundedReceiver<FuturesChannelResult>,
futures_channel: WorkflowStepFuturesChannel,
}
struct Handler {
prepare_stream_sender: UnboundedSender<String>,
stop_stream_sender: UnboundedSender<()>,
}
impl ExternalStreamHandler for Handler {
fn prepare_stream(
&mut self,
stream_name: &str,
_futures_channel: &WorkflowStepFuturesChannel,
) {
let _ = self.prepare_stream_sender.send(stream_name.to_string());
}
fn stop_stream(&mut self) {
let _ = self.stop_stream_sender.send(());
}
fn handle_resolved_future(
&mut self,
_future: Box<dyn StreamHandlerFutureResult>,
) -> ResolvedFutureStatus {
ResolvedFutureStatus::Success
}
}
struct Generator {
prepare_stream_sender: UnboundedSender<String>,
stop_stream_sender: UnboundedSender<()>,
}
impl ExternalStreamHandlerGenerator for Generator {
fn generate(&self, _stream_id: StreamId) -> Box<dyn ExternalStreamHandler + Sync + Send> {
Box::new(Handler {
prepare_stream_sender: self.prepare_stream_sender.clone(),
stop_stream_sender: self.stop_stream_sender.clone(),
})
}
}
impl TestContext {
fn new() -> Self {
let (rtmp_sender, rtmp_receiver) = unbounded_channel();
let (prepare_sender, prepare_receiver) = unbounded_channel();
let (stop_sender, stop_receiver) = unbounded_channel();
let generator = Box::new(Generator {
prepare_stream_sender: prepare_sender,
stop_stream_sender: stop_sender,
});
let mut metadata_map = MetadataKeyMap::new();
let is_keyframe_metadata_key = get_is_keyframe_metadata_key(&mut metadata_map);
let pts_offset_metadata_key = get_pts_offset_metadata_key(&mut metadata_map);
let (futures_sender, futures_receiver) = unbounded_channel();
let futures_channel =
WorkflowStepFuturesChannel::new(WorkflowStepId(123), futures_sender);
let reader = ExternalStreamReader::new(
Arc::new("app".to_string()),
rtmp_sender,
generator,
is_keyframe_metadata_key,
pts_offset_metadata_key,
&futures_channel,
);
TestContext {
rtmp_endpoint: rtmp_receiver,
external_stream_reader: reader,
prepare_stream_receiver: prepare_receiver,
stop_stream_receiver: stop_receiver,
futures_channel_receiver: futures_receiver,
futures_channel,
}
}
async fn accept_stream(&mut self) -> UnboundedReceiver<RtmpEndpointMediaMessage> {
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
};
self.external_stream_reader
.handle_media(media, &mut outputs, &self.futures_channel);
let response = test_utils::expect_mpsc_response(&mut self.rtmp_endpoint).await;
let (notification_channel, media_channel) = match response {
RtmpEndpointRequest::ListenForWatchers {
notification_channel,
media_channel,
..
} => (notification_channel, media_channel),
response => panic!("Unexpected request: {:?}", response),
};
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful)
.expect("Failed to send registration success response");
match self.expect_future_resolved().await {
FuturesChannelInnerResult::Generic(result) => {
self.external_stream_reader
.handle_resolved_future(result, &self.futures_channel);
}
FuturesChannelInnerResult::Media(_) => {
panic!("Expected a generic step future result but instead got media packet");
}
}
media_channel
}
/// Gets the first future that was resolved on the workflow step futures channel. If no future
/// is resolved, then a panic will ensue.
pub async fn expect_future_resolved(&mut self) -> FuturesChannelInnerResult {
let future = self.futures_channel_receiver.recv();
match timeout(Duration::from_millis(10), future).await {
Ok(Some(response)) => response.result,
_ => panic!("No future resolved within timeout period"),
}
}
}
#[tokio::test]
async fn watch_request_on_stream_connected_message() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
};
context
.external_stream_reader
.handle_media(media, &mut outputs, &context.futures_channel);
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForWatchers {
port,
rtmp_app,
rtmp_stream_key: _,
requires_registrant_approval,
media_channel: _,
use_tls,
ip_restrictions,
notification_channel: _,
} => {
assert_eq!(port, 1935, "Unexpected port");
assert_eq!(rtmp_app.as_str(), "app", "Unexpected rtmp application");
assert!(!use_tls, "Expected use tls to be disabled");
assert!(
!requires_registrant_approval,
"Expected not to require registrant approval"
);
assert_eq!(
ip_restrictions,
IpRestriction::None,
"Expected no ip restrictions"
);
}
response => panic!("Expected ListenForWatchers, instead got {:?}", response),
}
}
#[tokio::test]
async fn stream_connected_message_passed_immediately_as_output() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
};
context
.external_stream_reader
.handle_media(media, &mut outputs, &context.futures_channel);
assert_eq!(outputs.media.len(), 1, "Expected single media output");
assert_eq!(
outputs.media[0].stream_id.0.as_str(),
"abc",
"Unexpected stream id"
);
match &outputs.media[0].content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
content => panic!("Expected NewIncomingStream, got {:?}", content),
}
}
#[tokio::test]
async fn stream_disconnected_message_passed_immediately_as_output() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
};
context
.external_stream_reader
.handle_media(media, &mut outputs, &context.futures_channel);
assert_eq!(outputs.media.len(), 1, "Expected single media output");
assert_eq!(
outputs.media[0].stream_id.0.as_str(),
"abc",
"Unexpected stream id"
);
match &outputs.media[0].content {
MediaNotificationContent::StreamDisconnected => (),
content => panic!("Expected NewIncomingStream, got {:?}", content),
}
}
#[tokio::test]
async fn metadata_message_passed_immediately_as_output() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let mut metadata = HashMap::new();
metadata.insert("width".to_string(), "1920".to_string());
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::Metadata {
data: metadata.clone(),
},
};
context
.external_stream_reader
.handle_media(media, &mut outputs, &context.futures_channel);
assert_eq!(outputs.media.len(), 1, "Expected single media output");
assert_eq!(
outputs.media[0].stream_id.0.as_str(),
"abc",
"Unexpected stream id"
);
match &outputs.media[0].content {
MediaNotificationContent::Metadata { data } => {
assert_eq!(data, &metadata, "Unexpected metadata in output");
}
content => panic!("Expected NewIncomingStream, got {:?}", content),
}
}
#[tokio::test]
async fn video_message_passed_immediately_as_output() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let video_timestamp =
VideoTimestamp::from_durations(Duration::from_millis(5), Duration::from_millis(15));
let mut buffer = BytesMut::new();
let mut metadata_key_map = MetadataKeyMap::new();
let is_keyframe_metadata_key = get_is_keyframe_metadata_key(&mut metadata_key_map);
let pts_offset_metadata_key = get_pts_offset_metadata_key(&mut metadata_key_map);
let is_keyframe_metadata = MetadataEntry::new(
is_keyframe_metadata_key,
MetadataValue::Bool(true),
&mut buffer,
)
.unwrap();
let pts_offset_metadata = MetadataEntry::new(
pts_offset_metadata_key,
MetadataValue::I32(video_timestamp.pts_offset()),
&mut buffer,
)
.unwrap();
let metadata = MediaPayloadMetadataCollection::new(
[is_keyframe_metadata, pts_offset_metadata].into_iter(),
&mut buffer,
);
let media_content = MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: video_timestamp.dts(),
is_required_for_decoding: true,
metadata,
data: Bytes::from(vec![1, 2, 3]),
};
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: media_content.clone(),
};
context
.external_stream_reader
.handle_media(media, &mut outputs, &context.futures_channel);
assert_eq!(outputs.media.len(), 1, "Expected single media output");
assert_eq!(
outputs.media[0].stream_id.0.as_str(),
"abc",
"Unexpected stream id"
);
assert_eq!(
outputs.media[0].content, media_content,
"Unexpected media content"
);
}
#[tokio::test]
async fn audio_message_passed_immediately_as_output() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2, 3]),
timestamp: Duration::from_millis(5),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
};
context.external_stream_reader.handle_media(
media.clone(),
&mut outputs,
&context.futures_channel,
);
assert_eq!(outputs.media.len(), 1, "Expected single media output");
assert_eq!(
outputs.media[0].stream_id.0.as_str(),
"abc",
"Unexpected stream id"
);
assert_eq!(
outputs.media[0].content, media.content,
"Unexpected media content"
);
}
#[tokio::test]
async fn successful_watch_registration_calls_prepare_stream() {
let mut context = TestContext::new();
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
};
context
.external_stream_reader
.handle_media(media, &mut outputs, &context.futures_channel);
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
let channel = match response {
RtmpEndpointRequest::ListenForWatchers {
notification_channel,
..
} => notification_channel,
response => panic!("Unexpected request: {:?}", response),
};
channel
.send(RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful)
.expect("Failed to send registration success response");
match context.expect_future_resolved().await {
FuturesChannelInnerResult::Generic(result) => {
context
.external_stream_reader
.handle_resolved_future(result, &context.futures_channel);
}
FuturesChannelInnerResult::Media(_) => {
panic!("Expected a generic step future result but instead got media packet");
}
}
let stream_name =
test_utils::expect_mpsc_response(&mut context.prepare_stream_receiver).await;
assert_eq!(&stream_name, "def", "Unexpected stream name prepared");
}
#[tokio::test]
async fn stream_disconnection_calls_stop_stream() {
let mut context = TestContext::new();
let _ = context.accept_stream().await;
let mut outputs = StepOutputs::new();
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | true |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/mod.rs | mmids-rtmp/src/workflow_steps/mod.rs | //! RTMP related mmids workflow steps
pub mod external_stream_handler;
pub mod external_stream_reader;
pub mod rtmp_receive;
pub mod rtmp_watch;
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/rtmp_watch/tests.rs | mmids-rtmp/src/workflow_steps/rtmp_watch/tests.rs | use super::*;
use crate::rtmp_server::{
RtmpEndpointMediaData, RtmpEndpointMediaMessage, RtmpEndpointWatcherNotification,
};
use anyhow::Result;
use bytes::{Bytes, BytesMut};
use mmids_core::net::ConnectionId;
use mmids_core::test_utils::expect_mpsc_response;
use mmids_core::workflows::definitions::WorkflowStepType;
use mmids_core::workflows::metadata::common_metadata::{
get_is_keyframe_metadata_key, get_pts_offset_metadata_key,
};
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKeyMap,
};
use mmids_core::workflows::steps::test_utils::StepTestContext;
use mmids_core::workflows::{MediaNotification, MediaNotificationContent, MediaType};
use mmids_core::{test_utils, StreamId};
use rml_rtmp::time::RtmpTimestamp;
use std::collections::{HashMap, HashSet};
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::channel;
struct TestContext {
step_context: StepTestContext,
rtmp_endpoint: UnboundedReceiver<RtmpEndpointRequest>,
reactor_manager: UnboundedReceiver<ReactorManagerRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct DefinitionBuilder {
port: Option<u16>,
app: Option<String>,
key: Option<String>,
reactor: Option<String>,
}
impl DefinitionBuilder {
fn new() -> Self {
DefinitionBuilder {
port: None,
app: None,
key: None,
reactor: None,
}
}
fn port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
fn app(mut self, app: &str) -> Self {
self.app = Some(app.to_string());
self
}
fn key(mut self, key: &str) -> Self {
self.key = Some(key.to_string());
self
}
fn reactor_name(mut self, name: &str) -> Self {
self.reactor = Some(name.to_string());
self
}
fn build(self) -> WorkflowStepDefinition {
let mut definition = WorkflowStepDefinition {
step_type: WorkflowStepType("rtmp_watch".to_string()),
parameters: HashMap::new(),
};
if let Some(port) = self.port {
definition
.parameters
.insert(PORT_PROPERTY_NAME.to_string(), Some(port.to_string()));
}
if let Some(app) = self.app {
definition
.parameters
.insert(APP_PROPERTY_NAME.to_string(), Some(app));
} else {
definition
.parameters
.insert(APP_PROPERTY_NAME.to_string(), Some("app".to_string()));
}
if let Some(key) = self.key {
definition
.parameters
.insert(STREAM_KEY_PROPERTY_NAME.to_string(), Some(key));
} else {
definition
.parameters
.insert(STREAM_KEY_PROPERTY_NAME.to_string(), Some("*".to_string()));
}
if let Some(reactor) = self.reactor {
definition
.parameters
.insert(REACTOR_NAME.to_string(), Some(reactor));
}
definition
}
}
impl TestContext {
fn new(definition: WorkflowStepDefinition) -> Result<Self> {
let (reactor_sender, reactor_receiver) = unbounded_channel();
let (rtmp_sender, rtmp_receiver) = unbounded_channel();
let mut metadata_map = MetadataKeyMap::new();
let is_keyframe_metadata_key = get_is_keyframe_metadata_key(&mut metadata_map);
let pts_offset_metadata_key = get_pts_offset_metadata_key(&mut metadata_map);
let generator = RtmpWatchStepGenerator {
reactor_manager: reactor_sender,
rtmp_endpoint_sender: rtmp_sender,
is_keyframe_metadata_key,
pts_offset_metadata_key,
};
let step_context = StepTestContext::new(Box::new(generator), definition)?;
Ok(TestContext {
step_context,
rtmp_endpoint: rtmp_receiver,
reactor_manager: reactor_receiver,
is_keyframe_metadata_key,
pts_offset_metadata_key,
})
}
async fn accept_registration(
&mut self,
) -> (
UnboundedSender<RtmpEndpointWatcherNotification>,
UnboundedReceiver<RtmpEndpointMediaMessage>,
) {
let request = test_utils::expect_mpsc_response(&mut self.rtmp_endpoint).await;
let channel = match request {
RtmpEndpointRequest::ListenForWatchers {
media_channel,
notification_channel,
..
} => {
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful)
.expect("Failed to send registration response");
(notification_channel, media_channel)
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
self.step_context.execute_pending_futures().await;
channel
}
async fn get_reactor_channel(&mut self) -> UnboundedSender<ReactorWorkflowUpdate> {
let request = test_utils::expect_mpsc_response(&mut self.reactor_manager).await;
match request {
ReactorManagerRequest::CreateWorkflowForStreamName {
response_channel, ..
} => response_channel,
request => panic!("Unexpected request: {:?}", request),
}
}
}
#[tokio::test]
async fn requests_registration_for_watchers() {
let definition = DefinitionBuilder::new()
.port(1234)
.app("some_app")
.key("some_key")
.build();
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForWatchers {
port,
rtmp_app,
rtmp_stream_key,
..
} => {
assert_eq!(port, 1234, "Unexpected port");
assert_eq!(rtmp_app.as_str(), "some_app", "Unexpected rtmp app");
assert_eq!(
rtmp_stream_key,
StreamKeyRegistration::Exact(Arc::new("some_key".to_string())),
"Unexpected stream key"
);
}
response => panic!("Unexpected response: {:?}", response),
}
}
#[tokio::test]
async fn no_port_specified_defaults_to_1935() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(PORT_PROPERTY_NAME);
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForWatchers { port, .. } => {
assert_eq!(port, 1935, "Unexpected port");
}
response => panic!("Unexpected response: {:?}", response),
}
}
#[tokio::test]
async fn asterisk_stream_key_acts_as_wildcard() {
let mut definition = DefinitionBuilder::new().build();
definition
.parameters
.insert(STREAM_KEY_PROPERTY_NAME.to_string(), Some("*".to_string()));
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForWatchers {
rtmp_stream_key, ..
} => {
assert_eq!(
rtmp_stream_key,
StreamKeyRegistration::Any,
"Unexpected stream key"
);
}
response => panic!("Unexpected response: {:?}", response),
}
}
#[test]
fn error_if_no_app_provided() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(APP_PROPERTY_NAME);
if TestContext::new(definition).is_ok() {
panic!("Expected failure");
}
}
#[test]
fn error_if_no_stream_key_provided() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(STREAM_KEY_PROPERTY_NAME);
if TestContext::new(definition).is_ok() {
panic!("Expected failure");
}
}
#[tokio::test]
async fn new_step_is_in_created_status() {
let definition = DefinitionBuilder::new().build();
let context = TestContext::new(definition).unwrap();
let status = context.step_context.status;
assert_eq!(status, StepStatus::Created, "Unexpected step status");
}
#[tokio::test]
async fn registration_failure_changes_status_to_error() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
let _channel = match response {
RtmpEndpointRequest::ListenForWatchers {
notification_channel,
..
} => {
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRegistrationFailed)
.expect("Failed to send failure response");
notification_channel
}
response => panic!("Unexpected response: {:?}", response),
};
context.step_context.execute_pending_futures().await;
let status = context.step_context.status;
match status {
StepStatus::Error { message: _ } => (),
_ => panic!("Unexpected status: {:?}", status),
}
}
#[tokio::test]
async fn registration_success_changes_status_to_active() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
let _channel = match response {
RtmpEndpointRequest::ListenForWatchers {
notification_channel,
..
} => {
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful)
.expect("Failed to send failure response");
notification_channel
}
response => panic!("Unexpected response: {:?}", response),
};
context.step_context.execute_pending_futures().await;
let status = context.step_context.status;
match status {
StepStatus::Active => (),
_ => panic!("Unexpected status: {:?}", status),
}
}
#[tokio::test]
async fn video_packet_not_sent_to_media_channel_if_new_stream_message_not_received() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![3, 4]),
is_required_for_decoding: true,
timestamp: Duration::new(0, 0),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
test_utils::expect_mpsc_timeout(&mut media_channel).await;
}
#[tokio::test]
async fn video_packet_sent_to_media_channel_after_new_stream_message_received() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let is_keyframe_metadata = MetadataEntry::new(
context.is_keyframe_metadata_key,
MetadataValue::Bool(true),
&mut BytesMut::new(),
)
.unwrap();
let pts_offset_metadata = MetadataEntry::new(
context.pts_offset_metadata_key,
MetadataValue::I32(10),
&mut BytesMut::new(),
)
.unwrap();
let metadata = MediaPayloadMetadataCollection::new(
[is_keyframe_metadata, pts_offset_metadata].into_iter(),
&mut BytesMut::new(),
);
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![3, 4]),
is_required_for_decoding: true,
timestamp: Duration::from_millis(5),
metadata,
},
});
let media = expect_mpsc_response(&mut media_channel).await;
assert_eq!(media.stream_key.as_str(), "def");
match &media.data {
RtmpEndpointMediaData::NewVideoData {
data,
timestamp,
is_keyframe,
is_sequence_header,
composition_time_offset,
} => {
assert_eq!(data, &vec![3, 4], "Unexpected video bytes");
assert_eq!(timestamp, &RtmpTimestamp::new(5), "Unexpected timestamp");
assert!(is_keyframe, "Expected is_keyframe to be true");
assert!(is_sequence_header, "Expected is_sequence_header to be true");
assert_eq!(
composition_time_offset, &10,
"Unexpected composition time offset"
);
}
_ => panic!("Unexpected media data: {:?}", media.data),
}
}
#[tokio::test]
async fn video_packet_not_sent_to_media_channel_after_stream_disconnection_message_received() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![3, 4]),
is_required_for_decoding: true,
timestamp: Duration::from_millis(5),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
test_utils::expect_mpsc_timeout(&mut media_channel).await;
}
#[tokio::test]
async fn video_packet_not_sent_to_media_channel_when_new_stream_is_for_different_stream_id() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("def".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![3, 4]),
is_required_for_decoding: true,
timestamp: Duration::from_millis(5),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
test_utils::expect_mpsc_timeout(&mut media_channel).await;
}
#[tokio::test]
async fn audio_packet_sent_to_media_channel_after_new_stream_message_received() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![3, 4]),
timestamp: Duration::from_millis(1),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
let media = expect_mpsc_response(&mut media_channel).await;
assert_eq!(media.stream_key.as_str(), "def");
match &media.data {
RtmpEndpointMediaData::NewAudioData {
data,
timestamp,
is_sequence_header,
} => {
assert_eq!(data, &vec![3, 4], "Unexpected video bytes");
assert_eq!(timestamp, &RtmpTimestamp::new(1), "Unexpected timestamp");
assert!(is_sequence_header, "Expected is_sequence_header to be true");
}
_ => panic!("Unexpected media data: {:?}", media.data),
}
}
#[tokio::test]
async fn metadata_packet_sent_to_media_channel_after_new_stream_message_received() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let mut metadata = HashMap::new();
metadata.insert("width".to_string(), "1920".to_string());
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::Metadata { data: metadata },
});
let media = expect_mpsc_response(&mut media_channel).await;
assert_eq!(media.stream_key.as_str(), "def");
match &media.data {
RtmpEndpointMediaData::NewStreamMetaData { metadata } => {
assert_eq!(metadata.video_width, Some(1920), "Unexpected video width");
}
_ => panic!("Unexpected media data: {:?}", media.data),
}
}
#[tokio::test]
async fn media_message_uses_strict_stream_key_when_exact_key_registered() {
let definition = DefinitionBuilder::new().key("specific_key").build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, mut media_channel) = context.accept_registration().await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![3, 4]),
is_required_for_decoding: true,
timestamp: Duration::from_millis(5),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
let media = expect_mpsc_response(&mut media_channel).await;
assert_eq!(
media.stream_key.as_str(),
"specific_key",
"Unexpected stream key"
);
}
#[tokio::test]
async fn new_stream_message_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, _media_channel) = context.accept_registration().await;
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
}
#[tokio::test]
async fn stream_disconnected_message_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, _media_channel) = context.accept_registration().await;
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
}
#[tokio::test]
async fn video_message_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, _media_channel) = context.accept_registration().await;
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![3, 4]),
is_required_for_decoding: true,
timestamp: Duration::from_millis(5),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
}
#[tokio::test]
async fn audio_message_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, _media_channel) = context.accept_registration().await;
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![3, 4]),
timestamp: Duration::from_millis(1),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
}
#[tokio::test]
async fn metadata_message_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let (_notification_channel, _media_channel) = context.accept_registration().await;
let mut metadata = HashMap::new();
metadata.insert("a".to_string(), "b".to_string());
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::Metadata { data: metadata },
});
}
#[tokio::test]
async fn watchers_requiring_approval_sends_request_to_reactor() {
let definition = DefinitionBuilder::new()
.reactor_name("some_reactor")
.build();
let mut context = TestContext::new(definition).unwrap();
let (notification_channel, _media_channel) = context.accept_registration().await;
let (sender, _receiver) = channel();
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRequiringApproval {
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("def".to_string())),
response_channel: sender,
})
.expect("Failed to send approval request");
context.step_context.execute_pending_futures().await;
let request = test_utils::expect_mpsc_response(&mut context.reactor_manager).await;
match request {
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name,
stream_name,
..
} => {
assert_eq!(
reactor_name.as_str(),
"some_reactor",
"Unexpected reactor name"
);
assert_eq!(stream_name.as_str(), "abc", "Unexpected stream name");
}
request => panic!("Unexpected request: {:?}", request),
}
}
#[tokio::test]
async fn reactor_responding_with_invalid_sends_rejection_response() {
let definition = DefinitionBuilder::new()
.reactor_name("some_reactor")
.build();
let mut context = TestContext::new(definition).unwrap();
let (notification_channel, _media_channel) = context.accept_registration().await;
let (sender, receiver) = channel();
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRequiringApproval {
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("def".to_string())),
response_channel: sender,
})
.expect("Failed to send approval request");
context.step_context.execute_pending_futures().await;
let reactor_channel = context.get_reactor_channel().await;
reactor_channel
.send(ReactorWorkflowUpdate {
is_valid: false,
routable_workflow_names: HashSet::new(),
})
.expect("Failed to send reactor response");
context.step_context.execute_pending_futures().await;
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
ValidationResponse::Reject => (),
response => panic!("Unexpected response: {:?}", response),
}
}
#[tokio::test]
async fn reactor_responding_with_valid_sends_approved_response() {
let definition = DefinitionBuilder::new()
.reactor_name("some_reactor")
.build();
let mut context = TestContext::new(definition).unwrap();
let (notification_channel, _media_channel) = context.accept_registration().await;
let (sender, receiver) = channel();
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRequiringApproval {
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("def".to_string())),
response_channel: sender,
})
.expect("Failed to send approval request");
context.step_context.execute_pending_futures().await;
let reactor_channel = context.get_reactor_channel().await;
reactor_channel
.send(ReactorWorkflowUpdate {
is_valid: true,
routable_workflow_names: HashSet::new(),
})
.expect("Failed to send reactor response");
context.step_context.execute_pending_futures().await;
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
ValidationResponse::Approve { .. } => (),
response => panic!("Unexpected response: {:?}", response),
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/rtmp_watch/mod.rs | mmids-rtmp/src/workflow_steps/rtmp_watch/mod.rs | //! The RTMP watch step registers with the RTMP server endpoint to allow for RTMP clients to connect
//! and watch media streams based on the specified port, application name, and stream key
//! combinations. When the workflow step is passed in media notifications it passes them to
//! the RTMP endpoint for distribution for waiting clients.
//!
//! When a stream key of `*` is specified, this allows for RTMP clients to connect on any stream key
//! for the rtmp application to watch video. Media packets will be routed to clients that connected
//! on stream key that matches the name of the stream in the pipeline.
//!
//! If an exact stream key is configured, then the first media stream that comes into the step will
//! be surfaced on that stream key.
//!
//! All media notifications that are passed into this step are passed onto the next step.
#[cfg(test)]
mod tests;
use crate::rtmp_server::{
IpRestriction, RegistrationType, RtmpEndpointMediaData, RtmpEndpointMediaMessage,
RtmpEndpointRequest, RtmpEndpointWatcherNotification, StreamKeyRegistration,
ValidationResponse,
};
use crate::utils::hash_map_to_stream_metadata;
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use mmids_core::net::{IpAddress, IpAddressParseError};
use mmids_core::reactors::manager::ReactorManagerRequest;
use mmids_core::reactors::ReactorWorkflowUpdate;
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::metadata::{MetadataKey, MetadataValue};
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::workflows::{MediaNotification, MediaNotificationContent};
use mmids_core::StreamId;
use rml_rtmp::time::RtmpTimestamp;
use std::collections::HashMap;
use std::sync::Arc;
use thiserror::Error as ThisError;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::Sender;
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
pub const PORT_PROPERTY_NAME: &str = "port";
pub const APP_PROPERTY_NAME: &str = "rtmp_app";
pub const STREAM_KEY_PROPERTY_NAME: &str = "stream_key";
pub const IP_ALLOW_PROPERTY_NAME: &str = "allow_ips";
pub const IP_DENY_PROPERTY_NAME: &str = "deny_ips";
pub const RTMPS_FLAG: &str = "rtmps";
pub const REACTOR_NAME: &str = "reactor";
/// Generates new rtmp watch workflow step instances based on a given step definition.
pub struct RtmpWatchStepGenerator {
rtmp_endpoint_sender: UnboundedSender<RtmpEndpointRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct StreamWatchers {
// Use an unbounded channel for this instead of a one shot, as we risk losing the cancellation
// channel when a reactor update comes through. We can work around this by recreating the
// cancellation token each time, but it's easier to just use an `UnboundedSender` instead.
cancellation_token: Option<CancellationToken>,
}
impl Drop for StreamWatchers {
fn drop(&mut self) {
if let Some(token) = self.cancellation_token.take() {
token.cancel();
}
}
}
struct RtmpWatchStep {
port: u16,
rtmp_app: Arc<String>,
stream_key: StreamKeyRegistration,
reactor_name: Option<Arc<String>>,
status: StepStatus,
rtmp_endpoint_sender: UnboundedSender<RtmpEndpointRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
media_channel: UnboundedSender<RtmpEndpointMediaMessage>,
stream_id_to_name_map: HashMap<StreamId, Arc<String>>,
stream_watchers: HashMap<Arc<String>, StreamWatchers>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
impl StepFutureResult for RtmpWatchStepFutureResult {}
enum RtmpWatchStepFutureResult {
RtmpEndpointGone,
ReactorManagerGone,
ReactorGone,
RtmpWatchNotificationReceived(RtmpEndpointWatcherNotification),
ReactorWorkflowResponse {
is_valid: bool,
validation_channel: Sender<ValidationResponse>,
reactor_update_channel: UnboundedReceiver<ReactorWorkflowUpdate>,
},
ReactorUpdateReceived {
stream_name: Arc<String>,
update: ReactorWorkflowUpdate,
},
ReactorReceiverCanceled {
stream_name: Arc<String>,
},
}
#[derive(ThisError, Debug)]
enum StepStartupError {
#[error(
"No RTMP app specified. A non-empty parameter of '{}' is required",
PORT_PROPERTY_NAME
)]
NoRtmpApp,
#[error(
"No stream key specified. A non-empty parameter of '{}' is required",
APP_PROPERTY_NAME
)]
NoStreamKey,
#[error(
"Invalid port value of '{0}' specified. A number from 0 to 65535 should be specified"
)]
InvalidPort(String),
#[error("Failed to parse ip address")]
InvalidIpAddress(#[from] IpAddressParseError),
#[error(
"Both {} and {} were specified, but only one is allowed",
IP_ALLOW_PROPERTY_NAME,
IP_DENY_PROPERTY_NAME
)]
BothDenyAndAllowIpRestrictions,
}
impl RtmpWatchStepGenerator {
pub fn new(
rtmp_endpoint_sender: UnboundedSender<RtmpEndpointRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
) -> Self {
RtmpWatchStepGenerator {
rtmp_endpoint_sender,
reactor_manager,
is_keyframe_metadata_key,
pts_offset_metadata_key,
}
}
}
impl StepGenerator for RtmpWatchStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let use_rtmps = definition.parameters.get(RTMPS_FLAG).is_some();
let port = match definition.parameters.get(PORT_PROPERTY_NAME) {
Some(Some(value)) => match value.parse::<u16>() {
Ok(num) => num,
Err(_) => {
return Err(Box::new(StepStartupError::InvalidPort(value.clone())));
}
},
_ => {
if use_rtmps {
443
} else {
1935
}
}
};
let app = match definition.parameters.get(APP_PROPERTY_NAME) {
Some(Some(x)) => Arc::new(x.trim().to_string()),
_ => return Err(Box::new(StepStartupError::NoRtmpApp)),
};
let stream_key = match definition.parameters.get(STREAM_KEY_PROPERTY_NAME) {
Some(Some(x)) => Arc::new(x.trim().to_string()),
_ => return Err(Box::new(StepStartupError::NoStreamKey)),
};
let stream_key = if stream_key.as_str() == "*" {
StreamKeyRegistration::Any
} else {
StreamKeyRegistration::Exact(stream_key)
};
let allowed_ips = match definition.parameters.get(IP_ALLOW_PROPERTY_NAME) {
Some(Some(value)) => IpAddress::parse_comma_delimited_list(Some(value))?,
_ => Vec::new(),
};
let denied_ips = match definition.parameters.get(IP_DENY_PROPERTY_NAME) {
Some(Some(value)) => IpAddress::parse_comma_delimited_list(Some(value))?,
_ => Vec::new(),
};
let ip_restriction = match (!allowed_ips.is_empty(), !denied_ips.is_empty()) {
(true, true) => {
return Err(Box::new(StepStartupError::BothDenyAndAllowIpRestrictions));
}
(true, false) => IpRestriction::Allow(allowed_ips),
(false, true) => IpRestriction::Deny(denied_ips),
(false, false) => IpRestriction::None,
};
let reactor_name = match definition.parameters.get(REACTOR_NAME) {
Some(Some(value)) => Some(Arc::new(value.clone())),
_ => None,
};
let (media_sender, media_receiver) = unbounded_channel();
let step = RtmpWatchStep {
status: StepStatus::Created,
port,
rtmp_app: app,
rtmp_endpoint_sender: self.rtmp_endpoint_sender.clone(),
reactor_manager: self.reactor_manager.clone(),
media_channel: media_sender,
stream_key,
stream_id_to_name_map: HashMap::new(),
reactor_name,
stream_watchers: HashMap::new(),
is_keyframe_metadata_key: self.is_keyframe_metadata_key,
pts_offset_metadata_key: self.pts_offset_metadata_key,
};
let (notification_sender, notification_receiver) = unbounded_channel();
let _ = step
.rtmp_endpoint_sender
.send(RtmpEndpointRequest::ListenForWatchers {
port: step.port,
rtmp_app: step.rtmp_app.clone(),
rtmp_stream_key: step.stream_key.clone(),
media_channel: media_receiver,
notification_channel: notification_sender,
ip_restrictions: ip_restriction,
use_tls: use_rtmps,
requires_registrant_approval: step.reactor_name.is_some(),
});
futures_channel.send_on_generic_unbounded_recv(
notification_receiver,
RtmpWatchStepFutureResult::RtmpWatchNotificationReceived,
|| RtmpWatchStepFutureResult::RtmpEndpointGone,
);
let reactor_manager = self.reactor_manager.clone();
futures_channel.send_on_generic_future_completion(async move {
reactor_manager.closed().await;
RtmpWatchStepFutureResult::ReactorManagerGone
});
let status = step.status.clone();
Ok((Box::new(step), status))
}
}
impl RtmpWatchStep {
fn handle_endpoint_notification(
&mut self,
notification: RtmpEndpointWatcherNotification,
futures_channel: &WorkflowStepFuturesChannel,
) {
match notification {
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => {
error!("Registration for RTMP watchers was denied");
self.status = StepStatus::Error {
message: "Registration for watchers failed".to_string(),
};
}
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => {
info!("Registration for RTMP watchers was accepted");
self.status = StepStatus::Active;
}
RtmpEndpointWatcherNotification::StreamKeyBecameActive {
stream_key,
reactor_update_channel,
} => {
info!(
stream_key = %stream_key,
"At least one watcher became active for stream key '{}'", stream_key
);
let cancellation_channel =
if let Some(reactor_update_channel) = reactor_update_channel {
let cancellation_token = CancellationToken::new();
let recv_stream_key = stream_key.clone();
let cancelled_stream_key = stream_key.clone();
futures_channel.send_on_generic_unbounded_recv_cancellable(
reactor_update_channel,
cancellation_token.child_token(),
move |update| RtmpWatchStepFutureResult::ReactorUpdateReceived {
stream_name: recv_stream_key.clone(),
update,
},
|| RtmpWatchStepFutureResult::ReactorGone,
move || RtmpWatchStepFutureResult::ReactorReceiverCanceled {
stream_name: cancelled_stream_key,
},
);
Some(cancellation_token)
} else {
None
};
self.stream_watchers.insert(
stream_key,
StreamWatchers {
cancellation_token: cancellation_channel,
},
);
}
RtmpEndpointWatcherNotification::StreamKeyBecameInactive { stream_key } => {
info!(
stream_key = %stream_key,
"All watchers left stream key '{}'", stream_key
);
self.stream_watchers.remove(&stream_key);
}
RtmpEndpointWatcherNotification::WatcherRequiringApproval {
connection_id,
stream_key,
response_channel,
} => {
if let Some(reactor) = &self.reactor_name {
let (sender, mut receiver) = unbounded_channel();
let _ = self.reactor_manager.send(
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name: reactor.clone(),
stream_name: stream_key,
response_channel: sender,
},
);
futures_channel.send_on_generic_future_completion(async move {
let is_valid = match receiver.recv().await {
Some(response) => response.is_valid,
None => false, // Assume not valid if channel closed
};
RtmpWatchStepFutureResult::ReactorWorkflowResponse {
is_valid,
validation_channel: response_channel,
reactor_update_channel: receiver,
}
});
} else {
error!(
connection_id = %connection_id,
stream_key = %stream_key,
"Watcher requires approval for stream key {} but no reactor name was set",
stream_key
);
let _ = response_channel.send(ValidationResponse::Reject);
}
}
}
}
fn handle_media(&mut self, media: MediaNotification, outputs: &mut StepOutputs) {
outputs.media.push(media.clone());
if self.status == StepStatus::Active {
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
// If this step was registered with an exact stream name, then we don't care
// what stream name this was originally published as. For watch purposes treat
// it as the configured stream key
let stream_name = match &self.stream_key {
StreamKeyRegistration::Any => stream_name,
StreamKeyRegistration::Exact(configured_stream_name) => {
configured_stream_name
}
};
info!(
stream_id = ?media.stream_id,
stream_name = %stream_name,
"New incoming stream notification found for stream id {:?} and stream name '{}", media.stream_id, stream_name
);
match self.stream_id_to_name_map.get(&media.stream_id) {
None => (),
Some(current_stream_name) => {
if current_stream_name == stream_name {
warn!(
stream_id = ?media.stream_id,
stream_name = %stream_name,
"New incoming stream notification for stream id {:?} is already mapped \
to this same stream name.", media.stream_id
);
} else {
warn!(
stream_id = ?media.stream_id,
new_stream_name = %stream_name,
active_stream_name = %current_stream_name,
"New incoming stream notification for stream id {:?} is already mapped \
to the stream name '{}'", media.stream_id, current_stream_name
);
}
}
}
self.stream_id_to_name_map
.insert(media.stream_id.clone(), stream_name.clone());
}
MediaNotificationContent::StreamDisconnected => {
info!(
stream_id = ?media.stream_id,
"Stream disconnected notification received for stream id {:?}", media.stream_id
);
match self.stream_id_to_name_map.remove(&media.stream_id) {
Some(_) => (),
None => {
warn!(
stream_id = ?media.stream_id,
"Disconnected stream {:?} was not mapped to a stream name", media.stream_id
);
}
}
}
MediaNotificationContent::Metadata { data } => {
let stream_key = match self.stream_id_to_name_map.get(&media.stream_id) {
Some(key) => key,
None => return,
};
let metadata = hash_map_to_stream_metadata(data);
let rtmp_media = RtmpEndpointMediaMessage {
stream_key: stream_key.clone(),
data: RtmpEndpointMediaData::NewStreamMetaData { metadata },
};
let _ = self.media_channel.send(rtmp_media);
}
MediaNotificationContent::MediaPayload {
data,
payload_type,
media_type: _,
timestamp,
metadata,
is_required_for_decoding,
} => {
let stream_key = match self.stream_id_to_name_map.get(&media.stream_id) {
Some(key) => key,
None => return,
};
let rtmp_media_data = match payload_type {
x if *x == *AUDIO_CODEC_AAC_RAW => RtmpEndpointMediaData::NewAudioData {
is_sequence_header: *is_required_for_decoding,
data: data.clone(),
timestamp: RtmpTimestamp::new(timestamp.as_millis() as u32),
},
x if *x == *VIDEO_CODEC_H264_AVC => {
let is_keyframe = metadata
.iter()
.filter(|m| m.key() == self.is_keyframe_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::Bool(val) => Some(val),
_ => None,
})
.next()
.unwrap_or_default();
let pts_offset = metadata
.iter()
.filter(|m| m.key() == self.pts_offset_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::I32(val) => Some(val),
_ => None,
})
.next()
.unwrap_or_default();
RtmpEndpointMediaData::NewVideoData {
is_sequence_header: *is_required_for_decoding,
is_keyframe,
data: data.clone(),
timestamp: RtmpTimestamp::new(timestamp.as_millis() as u32),
composition_time_offset: pts_offset,
}
}
_ => return, // Payload type not supported by RTMP
};
let rtmp_media = RtmpEndpointMediaMessage {
stream_key: stream_key.clone(),
data: rtmp_media_data,
};
let _ = self.media_channel.send(rtmp_media);
}
}
}
}
}
impl WorkflowStep for RtmpWatchStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for notification in inputs.notifications.drain(..) {
let future_result = match notification.downcast::<RtmpWatchStepFutureResult>() {
Ok(x) => *x,
Err(_) => {
error!("Rtmp receive step received a notification that is not an 'RtmpReceiveFutureResult' type");
return StepStatus::Error {
message: "Received invalid future result type".to_string(),
};
}
};
match future_result {
RtmpWatchStepFutureResult::RtmpEndpointGone => {
error!("Rtmp endpoint gone, shutting step down");
return StepStatus::Error {
message: "Rtmp endpoint gone".to_string(),
};
}
RtmpWatchStepFutureResult::ReactorManagerGone => {
error!("Reactor manager gone");
return StepStatus::Error {
message: "Reactor manager gone".to_string(),
};
}
RtmpWatchStepFutureResult::ReactorGone => {
if let Some(reactor_name) = &self.reactor_name {
error!("The {} reactor is gone", reactor_name);
} else {
error!("Received notice that the reactor is gone, but this step doesn't use one");
}
return StepStatus::Error {
message: "Reactor gone".to_string(),
};
}
RtmpWatchStepFutureResult::RtmpWatchNotificationReceived(notification) => {
self.handle_endpoint_notification(notification, &futures_channel);
}
RtmpWatchStepFutureResult::ReactorWorkflowResponse {
is_valid,
validation_channel,
reactor_update_channel,
} => {
if is_valid {
let _ = validation_channel.send(ValidationResponse::Approve {
reactor_update_channel,
});
} else {
let _ = validation_channel.send(ValidationResponse::Reject);
}
}
RtmpWatchStepFutureResult::ReactorUpdateReceived {
stream_name,
update,
} => {
// If the workflow is valid, then there's nothing to do but wait for the next
// update.
if !update.is_valid {
info!(
stream_key = %stream_name,
"Received update that stream {} is no longer tied to a workflow",
stream_name
);
// TODO: Need some way to disconnect watchers
}
}
RtmpWatchStepFutureResult::ReactorReceiverCanceled { stream_name } => {
if self.stream_watchers.remove(&stream_name).is_some() {
info!(
"Stream {}'s reactor updating has been cancelled",
stream_name
);
}
}
}
}
for media in inputs.media.drain(..) {
self.handle_media(media, outputs);
}
self.status.clone()
}
}
impl Drop for RtmpWatchStep {
fn drop(&mut self) {
let _ = self
.rtmp_endpoint_sender
.send(RtmpEndpointRequest::RemoveRegistration {
registration_type: RegistrationType::Watcher,
port: self.port,
rtmp_app: self.rtmp_app.clone(),
rtmp_stream_key: self.stream_key.clone(),
});
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/rtmp_receive/tests.rs | mmids-rtmp/src/workflow_steps/rtmp_receive/tests.rs | use super::*;
use anyhow::Result;
use bytes::Bytes;
use mmids_core::net::ConnectionId;
use mmids_core::workflows::definitions::WorkflowStepType;
use mmids_core::workflows::metadata::common_metadata::{
get_is_keyframe_metadata_key, get_pts_offset_metadata_key,
};
use mmids_core::workflows::metadata::MetadataKeyMap;
use mmids_core::workflows::steps::test_utils::StepTestContext;
use mmids_core::workflows::MediaNotificationContent::StreamDisconnected;
use mmids_core::workflows::{MediaNotification, MediaNotificationContent};
use mmids_core::{test_utils, StreamId};
use rml_rtmp::sessions::StreamMetadata;
use rml_rtmp::time::RtmpTimestamp;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::oneshot::channel;
struct TestContext {
step_context: StepTestContext,
rtmp_endpoint: UnboundedReceiver<RtmpEndpointRequest>,
reactor_manager: UnboundedReceiver<ReactorManagerRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct DefinitionBuilder {
port: Option<u16>,
app: Option<String>,
key: Option<String>,
reactor: Option<String>,
}
impl DefinitionBuilder {
fn new() -> Self {
DefinitionBuilder {
port: None,
app: None,
key: None,
reactor: None,
}
}
fn port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
fn app(mut self, app: &str) -> Self {
self.app = Some(app.to_string());
self
}
fn key(mut self, key: &str) -> Self {
self.key = Some(key.to_string());
self
}
fn reactor_name(mut self, name: &str) -> Self {
self.reactor = Some(name.to_string());
self
}
fn build(self) -> WorkflowStepDefinition {
let mut definition = WorkflowStepDefinition {
step_type: WorkflowStepType("rtmp_receive".to_string()),
parameters: HashMap::new(),
};
if let Some(port) = self.port {
definition
.parameters
.insert(PORT_PROPERTY_NAME.to_string(), Some(port.to_string()));
}
if let Some(app) = self.app {
definition
.parameters
.insert(APP_PROPERTY_NAME.to_string(), Some(app));
} else {
definition
.parameters
.insert(APP_PROPERTY_NAME.to_string(), Some("app".to_string()));
}
if let Some(key) = self.key {
definition
.parameters
.insert(STREAM_KEY_PROPERTY_NAME.to_string(), Some(key));
} else {
definition
.parameters
.insert(STREAM_KEY_PROPERTY_NAME.to_string(), Some("*".to_string()));
}
if let Some(reactor) = self.reactor {
definition
.parameters
.insert(REACTOR_NAME.to_string(), Some(reactor));
}
definition
}
}
impl TestContext {
fn new(definition: WorkflowStepDefinition) -> Result<Self> {
let (reactor_sender, reactor_receiver) = unbounded_channel();
let (rtmp_sender, rtmp_receiver) = unbounded_channel();
let mut metadata_key_map = MetadataKeyMap::new();
let is_keyframe_metadata_key = get_is_keyframe_metadata_key(&mut metadata_key_map);
let pts_offset_metadata_key = get_pts_offset_metadata_key(&mut metadata_key_map);
let generator = RtmpReceiverStepGenerator {
reactor_manager: reactor_sender,
rtmp_endpoint_sender: rtmp_sender,
is_keyframe_metadata_key,
pts_offset_metadata_key,
};
let step_context = StepTestContext::new(Box::new(generator), definition)?;
Ok(TestContext {
step_context,
rtmp_endpoint: rtmp_receiver,
reactor_manager: reactor_receiver,
is_keyframe_metadata_key,
pts_offset_metadata_key,
})
}
async fn accept_registration(&mut self) -> UnboundedSender<RtmpEndpointPublisherMessage> {
let request = test_utils::expect_mpsc_response(&mut self.rtmp_endpoint).await;
let channel = match request {
RtmpEndpointRequest::ListenForPublishers {
message_channel, ..
} => {
message_channel
.send(RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful)
.expect("Failed to send registration response");
message_channel
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
self.step_context.execute_pending_futures().await;
channel
}
async fn get_reactor_channel(&mut self) -> UnboundedSender<ReactorWorkflowUpdate> {
let request = test_utils::expect_mpsc_response(&mut self.reactor_manager).await;
match request {
ReactorManagerRequest::CreateWorkflowForStreamName {
response_channel, ..
} => response_channel,
request => panic!("Unexpected request: {:?}", request),
}
}
}
#[tokio::test]
async fn requests_registration_for_publishers() {
let definition = DefinitionBuilder::new()
.port(1234)
.app("some_app")
.key("some_key")
.build();
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForPublishers {
port,
rtmp_app,
rtmp_stream_key,
..
} => {
assert_eq!(port, 1234, "Unexpected port");
assert_eq!(rtmp_app.as_str(), "some_app", "Unexpected rtmp app");
assert_eq!(
rtmp_stream_key,
StreamKeyRegistration::Exact(Arc::new("some_key".to_string())),
"Unexpected stream key"
);
}
response => panic!("Unexpected rtmp request: {:?}", response),
}
}
#[tokio::test]
async fn no_port_specified_defaults_to_1935() {
let mut definition = DefinitionBuilder::new().key("app").key("key").build();
definition.parameters.remove(PORT_PROPERTY_NAME);
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForPublishers { port, .. } => {
assert_eq!(port, 1935, "Unexpected port");
}
response => panic!("Unexpected rtmp request: {:?}", response),
}
}
#[tokio::test]
async fn asterisk_stream_key_acts_as_wildcard() {
let definition = DefinitionBuilder::new().key("*").build();
let mut context = TestContext::new(definition).unwrap();
let response = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match response {
RtmpEndpointRequest::ListenForPublishers {
rtmp_stream_key, ..
} => {
assert_eq!(
rtmp_stream_key,
StreamKeyRegistration::Any,
"Unexpected stream key"
);
}
response => panic!("Unexpected rtmp request: {:?}", response),
}
}
#[tokio::test]
async fn error_if_no_app_specified() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(APP_PROPERTY_NAME);
if TestContext::new(definition).is_ok() {
panic!("Expected failure");
}
}
#[tokio::test]
async fn error_if_no_key_specified() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(STREAM_KEY_PROPERTY_NAME);
if TestContext::new(definition).is_ok() {
panic!("Expected failure");
}
}
#[tokio::test]
async fn step_starts_in_created_state() {
let definition = DefinitionBuilder::new().build();
let context = TestContext::new(definition).unwrap();
let status = context.step_context.status;
assert_eq!(status, StepStatus::Created, "Unexpected step status");
}
#[tokio::test]
async fn registration_failure_sets_status_to_error() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let request = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
let _channel = match request {
RtmpEndpointRequest::ListenForPublishers {
message_channel, ..
} => {
message_channel
.send(RtmpEndpointPublisherMessage::PublisherRegistrationFailed)
.expect("Failed to send registration response");
message_channel
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
context.step_context.execute_pending_futures().await;
let status = context.step_context.status;
match status {
StepStatus::Error { message: _ } => (),
_ => panic!("Unexpected status: {:?}", status),
}
}
#[tokio::test]
async fn registration_success_sets_status_to_active() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let request = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
let _channel = match request {
RtmpEndpointRequest::ListenForPublishers {
message_channel, ..
} => {
message_channel
.send(RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful)
.expect("Failed to send registration response");
message_channel
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
context.step_context.execute_pending_futures().await;
let status = context.step_context.status;
match status {
StepStatus::Active => (),
_ => panic!("Unexpected status: {:?}", status),
}
}
#[tokio::test]
async fn stream_started_notification_raised_when_publisher_connects() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let channel = context.accept_registration().await;
channel
.send(RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id: StreamId(Arc::new("test".to_string())),
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
reactor_update_channel: None,
})
.expect("Failed to send publisher connected message");
context.step_context.execute_pending_futures().await;
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "test", "Unexpected stream id");
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "abc", "Unexpected stream name");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn stream_disconnected_notification_raised_when_publisher_disconnects() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let channel = context.accept_registration().await;
channel
.send(RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id: StreamId(Arc::new("test".to_string())),
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
reactor_update_channel: None,
})
.expect("Failed to send publisher connected message");
context.step_context.execute_pending_futures().await;
context.step_context.media_outputs.clear();
channel
.send(RtmpEndpointPublisherMessage::PublishingStopped {
connection_id: ConnectionId(Arc::new("connection".to_string())),
})
.expect("Failed to send disconnected message");
context.step_context.execute_pending_futures().await;
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "test", "Unexpected stream id");
match &media.content {
MediaNotificationContent::StreamDisconnected => (),
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn metadata_notification_raised_when_publisher_sends_one() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let channel = context.accept_registration().await;
channel
.send(RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id: StreamId(Arc::new("test".to_string())),
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
reactor_update_channel: None,
})
.expect("Failed to send publisher connected message");
context.step_context.execute_pending_futures().await;
let mut metadata = StreamMetadata::new();
metadata.video_width = Some(1920);
channel
.send(RtmpEndpointPublisherMessage::StreamMetadataChanged {
metadata,
publisher: ConnectionId(Arc::new("connection".to_string())),
})
.expect("Failed to send metadata message");
context.step_context.execute_pending_futures().await;
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "test", "Unexpected stream id");
match &media.content {
MediaNotificationContent::Metadata { data } => {
assert_eq!(
data.get("width"),
Some(&"1920".to_string()),
"Unexpected width"
);
}
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn video_notification_received_when_publisher_sends_video() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let channel = context.accept_registration().await;
channel
.send(RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id: StreamId(Arc::new("test".to_string())),
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
reactor_update_channel: None,
})
.expect("Failed to send publisher connected message");
context.step_context.execute_pending_futures().await;
channel
.send(RtmpEndpointPublisherMessage::NewVideoData {
publisher: ConnectionId(Arc::new("connection".to_string())),
data: Bytes::from(vec![1, 2, 3]),
timestamp: RtmpTimestamp::new(5),
is_keyframe: true,
is_sequence_header: true,
composition_time_offset: 123,
})
.expect("Failed to send video message");
context.step_context.execute_pending_futures().await;
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "test", "Unexpected stream id");
match &media.content {
MediaNotificationContent::MediaPayload {
media_type,
payload_type,
data,
timestamp,
is_required_for_decoding,
metadata,
} => {
let is_keyframe = metadata
.iter()
.filter(|m| m.key() == context.is_keyframe_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::Bool(val) => Some(val),
_ => None,
})
.next()
.unwrap_or_default();
let pts_offset = metadata
.iter()
.filter(|m| m.key() == context.pts_offset_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::I32(val) => Some(val),
_ => None,
})
.next()
.unwrap_or_default();
assert_eq!(*media_type, MediaType::Video);
assert_eq!(
*payload_type, *VIDEO_CODEC_H264_AVC,
"Unexpected payload type"
);
assert_eq!(data, &vec![1, 2, 3], "Unexpected bytes");
assert_eq!(timestamp, &Duration::from_millis(5), "Unexpected dts");
assert!(
is_required_for_decoding,
"Expected is_required_for_decoding to be true"
);
assert!(is_keyframe, "Expected is_keyframe to be true");
assert_eq!(pts_offset, 123, "Unexpected pts offset");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn audio_notification_received_when_publisher_sends_audio() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
let channel = context.accept_registration().await;
channel
.send(RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id: StreamId(Arc::new("test".to_string())),
stream_key: Arc::new("abc".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
reactor_update_channel: None,
})
.expect("Failed to send publisher connected message");
context.step_context.execute_pending_futures().await;
channel
.send(RtmpEndpointPublisherMessage::NewAudioData {
publisher: ConnectionId(Arc::new("connection".to_string())),
data: Bytes::from(vec![1, 2, 3]),
timestamp: RtmpTimestamp::new(5),
is_sequence_header: true,
})
.expect("Failed to send audio message");
context.step_context.execute_pending_futures().await;
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "test", "Unexpected stream id");
let expected_content = MediaNotificationContent::MediaPayload {
timestamp: Duration::from_millis(5),
data: Bytes::from_static(&[1, 2, 3]),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
};
assert_eq!(media.content, expected_content, "Unexpected media content");
}
#[tokio::test]
async fn stream_started_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("name".to_string()),
},
});
}
#[tokio::test]
async fn stream_disconnected_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: StreamDisconnected,
});
}
#[tokio::test]
async fn metadata_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::Metadata {
data: HashMap::new(),
},
});
}
#[tokio::test]
async fn video_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![1, 2]),
timestamp: Duration::new(0, 0),
is_required_for_decoding: true,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
}
#[tokio::test]
async fn audio_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2]),
timestamp: Duration::from_millis(5),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
}
#[tokio::test]
async fn approval_required_requested_when_reactor_specified() {
let definition = DefinitionBuilder::new().reactor_name("abc").build();
let mut context = TestContext::new(definition).unwrap();
let request = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match request {
RtmpEndpointRequest::ListenForPublishers {
requires_registrant_approval,
..
} => {
assert!(
requires_registrant_approval,
"Expected requires approval to be true"
);
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
}
#[tokio::test]
async fn reactor_queried_for_stream_key_when_approval_required() {
let definition = DefinitionBuilder::new().reactor_name("abc").build();
let mut context = TestContext::new(definition).unwrap();
let publish_channel = context.accept_registration().await;
let (sender, _receiver) = channel();
publish_channel
.send(RtmpEndpointPublisherMessage::PublisherRequiringApproval {
stream_key: Arc::new("ab123".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
response_channel: sender,
})
.expect("Failed to send publisher message");
context.step_context.execute_pending_futures().await;
let request = test_utils::expect_mpsc_response(&mut context.reactor_manager).await;
match request {
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name,
stream_name,
..
} => {
assert_eq!(reactor_name.as_str(), "abc", "Unexpected reactor name");
assert_eq!(stream_name.as_str(), "ab123", "Unexpected stream name");
}
request => panic!("Unexpected request received: {:?}", request),
}
}
#[tokio::test]
async fn rejection_sent_when_reactor_says_stream_is_not_valid() {
let definition = DefinitionBuilder::new().reactor_name("reactor").build();
let mut context = TestContext::new(definition).unwrap();
let publish_channel = context.accept_registration().await;
let (sender, receiver) = channel();
publish_channel
.send(RtmpEndpointPublisherMessage::PublisherRequiringApproval {
stream_key: Arc::new("ab123".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
response_channel: sender,
})
.expect("Failed to send publisher message");
context.step_context.execute_pending_futures().await;
let reactor_channel = context.get_reactor_channel().await;
reactor_channel
.send(ReactorWorkflowUpdate {
is_valid: false,
routable_workflow_names: HashSet::new(),
})
.expect("Failed to send reactor response");
context.step_context.execute_pending_futures().await;
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
ValidationResponse::Reject => (),
response => panic!("Unexpected response: {:?}", response),
}
}
#[tokio::test]
async fn approval_sent_when_reactor_says_stream_is_valid() {
let definition = DefinitionBuilder::new().reactor_name("reactor").build();
let mut context = TestContext::new(definition).unwrap();
let publish_channel = context.accept_registration().await;
let (sender, receiver) = channel();
publish_channel
.send(RtmpEndpointPublisherMessage::PublisherRequiringApproval {
stream_key: Arc::new("ab123".to_string()),
connection_id: ConnectionId(Arc::new("connection".to_string())),
response_channel: sender,
})
.expect("Failed to send publisher message");
context.step_context.execute_pending_futures().await;
let reactor_channel = context.get_reactor_channel().await;
reactor_channel
.send(ReactorWorkflowUpdate {
is_valid: true,
routable_workflow_names: HashSet::new(),
})
.expect("Failed to send reactor response");
context.step_context.execute_pending_futures().await;
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
ValidationResponse::Approve { .. } => (),
response => panic!("Unexpected response: {:?}", response),
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-rtmp/src/workflow_steps/rtmp_receive/mod.rs | mmids-rtmp/src/workflow_steps/rtmp_receive/mod.rs | //! The RTMP Receive step registers with the RTMP server endpoint to allow publishers to connect with
//! the specified port, application name, and stream key combination. Any media packets that
//! RTMP publishers send in will be sent to the next steps.
//!
//! All media packets that come in from previous workflow steps are ignored.
#[cfg(test)]
mod tests;
use crate::rtmp_server::{
IpRestriction, RegistrationType, RtmpEndpointPublisherMessage, RtmpEndpointRequest,
StreamKeyRegistration, ValidationResponse,
};
use bytes::BytesMut;
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use mmids_core::net::{ConnectionId, IpAddress, IpAddressParseError};
use mmids_core::reactors::manager::ReactorManagerRequest;
use mmids_core::reactors::ReactorWorkflowUpdate;
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKey, MetadataValue,
};
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::workflows::{MediaNotification, MediaNotificationContent, MediaType};
use mmids_core::StreamId;
use std::collections::HashMap;
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error as ThisError;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::Sender;
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
pub const PORT_PROPERTY_NAME: &str = "port";
pub const APP_PROPERTY_NAME: &str = "rtmp_app";
pub const STREAM_KEY_PROPERTY_NAME: &str = "stream_key";
pub const IP_ALLOW_PROPERTY_NAME: &str = "allow_ips";
pub const IP_DENY_PROPERTY_NAME: &str = "deny_ips";
pub const RTMPS_FLAG: &str = "rtmps";
pub const REACTOR_NAME: &str = "reactor";
/// Generates new rtmp receiver workflow step instances based on specified step definitions.
pub struct RtmpReceiverStepGenerator {
rtmp_endpoint_sender: UnboundedSender<RtmpEndpointRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct ConnectionDetails {
stream_id: StreamId,
// Used to cancel the reactor update future. When a stream disconnects, this cancellation
// channel will be dropped causing the future waiting for reactor updates to be closed. This
// will inform the reactor that this step is no longer interested in whatever workflow it was
// managing for it. Not using a one shot, as the channel needs to live across multiple futures
// if updates come in.
cancellation_token: Option<CancellationToken>,
}
impl Drop for ConnectionDetails {
fn drop(&mut self) {
if let Some(token) = self.cancellation_token.take() {
token.cancel();
}
}
}
struct RtmpReceiverStep {
rtmp_endpoint_sender: UnboundedSender<RtmpEndpointRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
port: u16,
rtmp_app: Arc<String>,
stream_key: StreamKeyRegistration,
status: StepStatus,
connection_details: HashMap<ConnectionId, ConnectionDetails>,
reactor_name: Option<Arc<String>>,
metadata_buffer: BytesMut,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
impl StepFutureResult for FutureResult {}
enum FutureResult {
RtmpEndpointDroppedRegistration,
ReactorManagerGone,
ReactorGone,
RtmpEndpointResponseReceived(RtmpEndpointPublisherMessage),
ReactorWorkflowReturned {
is_valid: bool,
reactor_receiver: UnboundedReceiver<ReactorWorkflowUpdate>,
response_channel: Sender<ValidationResponse>,
},
ReactorUpdateReceived {
connection_id: ConnectionId,
update: ReactorWorkflowUpdate,
},
ReactorCancellationReceived,
}
#[derive(ThisError, Debug)]
enum StepStartupError {
#[error(
"No RTMP app specified. A non-empty parameter of '{}' is required",
PORT_PROPERTY_NAME
)]
NoRtmpApp,
#[error(
"No stream key specified. A non-empty parameter of '{}' is required",
APP_PROPERTY_NAME
)]
NoStreamKey,
#[error(
"Invalid port value of '{0}' specified. A number from 0 to 65535 should be specified"
)]
InvalidPort(String),
#[error("Failed to parse ip address")]
InvalidIpAddress(#[from] IpAddressParseError),
#[error(
"Both {} and {} were specified, but only one is allowed",
IP_ALLOW_PROPERTY_NAME,
IP_DENY_PROPERTY_NAME
)]
BothDenyAndAllowIpRestrictions,
}
impl RtmpReceiverStepGenerator {
pub fn new(
rtmp_endpoint_sender: UnboundedSender<RtmpEndpointRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
) -> Self {
RtmpReceiverStepGenerator {
rtmp_endpoint_sender,
reactor_manager,
is_keyframe_metadata_key,
pts_offset_metadata_key,
}
}
}
impl StepGenerator for RtmpReceiverStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let use_rtmps = definition.parameters.get(RTMPS_FLAG).is_some();
let port = match definition.parameters.get(PORT_PROPERTY_NAME) {
Some(Some(value)) => match value.parse::<u16>() {
Ok(num) => num,
Err(_) => {
return Err(Box::new(StepStartupError::InvalidPort(value.clone())));
}
},
_ => {
if use_rtmps {
443
} else {
1935
}
}
};
let app = match definition.parameters.get(APP_PROPERTY_NAME) {
Some(Some(x)) => Arc::new(x.trim().to_string()),
_ => return Err(Box::new(StepStartupError::NoRtmpApp)),
};
let stream_key = match definition.parameters.get(STREAM_KEY_PROPERTY_NAME) {
Some(Some(x)) => Arc::new(x.trim().to_string()),
_ => return Err(Box::new(StepStartupError::NoStreamKey)),
};
let allowed_ips = match definition.parameters.get(IP_ALLOW_PROPERTY_NAME) {
Some(Some(value)) => IpAddress::parse_comma_delimited_list(Some(value))?,
_ => Vec::new(),
};
let denied_ips = match definition.parameters.get(IP_DENY_PROPERTY_NAME) {
Some(Some(value)) => IpAddress::parse_comma_delimited_list(Some(value))?,
_ => Vec::new(),
};
let ip_restriction = match (!allowed_ips.is_empty(), !denied_ips.is_empty()) {
(true, true) => {
return Err(Box::new(StepStartupError::BothDenyAndAllowIpRestrictions));
}
(true, false) => IpRestriction::Allow(allowed_ips),
(false, true) => IpRestriction::Deny(denied_ips),
(false, false) => IpRestriction::None,
};
let reactor_name = match definition.parameters.get(REACTOR_NAME) {
Some(Some(value)) => Some(Arc::new(value.clone())),
_ => None,
};
let step = RtmpReceiverStep {
status: StepStatus::Created,
rtmp_endpoint_sender: self.rtmp_endpoint_sender.clone(),
reactor_manager: self.reactor_manager.clone(),
port,
rtmp_app: app,
connection_details: HashMap::new(),
reactor_name,
stream_key: if stream_key.as_str() == "*" {
StreamKeyRegistration::Any
} else {
StreamKeyRegistration::Exact(stream_key)
},
metadata_buffer: BytesMut::new(),
is_keyframe_metadata_key: self.is_keyframe_metadata_key,
pts_offset_metadata_key: self.pts_offset_metadata_key,
};
let (sender, receiver) = unbounded_channel();
let _ = step
.rtmp_endpoint_sender
.send(RtmpEndpointRequest::ListenForPublishers {
message_channel: sender,
port: step.port,
rtmp_app: step.rtmp_app.clone(),
rtmp_stream_key: step.stream_key.clone(),
stream_id: None,
ip_restrictions: ip_restriction,
use_tls: use_rtmps,
requires_registrant_approval: step.reactor_name.is_some(),
});
futures_channel.send_on_generic_unbounded_recv(
receiver,
FutureResult::RtmpEndpointResponseReceived,
|| FutureResult::RtmpEndpointDroppedRegistration,
);
let reactor_manager = self.reactor_manager.clone();
futures_channel.send_on_generic_future_completion(async move {
reactor_manager.closed().await;
FutureResult::ReactorManagerGone
});
let status = step.status.clone();
Ok((Box::new(step), status))
}
}
impl RtmpReceiverStep {
fn handle_rtmp_publisher_message(
&mut self,
outputs: &mut StepOutputs,
message: RtmpEndpointPublisherMessage,
futures_channel: &WorkflowStepFuturesChannel,
) {
match message {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => {
error!("Rtmp receive step failed to register for publish registration");
self.status = StepStatus::Error {
message: "Rtmp receive step failed to register for publish registration"
.to_string(),
};
}
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => {
info!("Rtmp receive step successfully registered for publishing");
self.status = StepStatus::Active;
}
RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id,
connection_id,
stream_key,
reactor_update_channel,
} => {
info!(
stream_id = ?stream_id,
connection_id = ?connection_id,
stream_key = %stream_key,
"Rtmp receive step seen new publisher: {:?}, {:?}, {:?}", stream_id, connection_id, stream_key
);
let cancellation_token = if let Some(update_channel) = reactor_update_channel {
let cancellation_token = CancellationToken::new();
let connection_id = connection_id.clone();
futures_channel.send_on_generic_unbounded_recv_cancellable(
update_channel,
cancellation_token.child_token(),
move |update| FutureResult::ReactorUpdateReceived {
connection_id: connection_id.clone(),
update,
},
|| FutureResult::ReactorGone,
|| FutureResult::ReactorCancellationReceived,
);
Some(cancellation_token)
} else {
None
};
self.connection_details.insert(
connection_id,
ConnectionDetails {
stream_id: stream_id.clone(),
cancellation_token,
},
);
outputs.media.push(MediaNotification {
stream_id,
content: MediaNotificationContent::NewIncomingStream {
stream_name: stream_key,
},
});
}
RtmpEndpointPublisherMessage::PublishingStopped { connection_id } => {
match self.connection_details.remove(&connection_id) {
None => (),
Some(connection) => {
info!(
stream_id = ?connection.stream_id,
connection_id = ?connection_id,
"Rtmp receive step notified that connection {:?} is no longer publishing stream {:?}",
connection_id, connection.stream_id
);
outputs.media.push(MediaNotification {
stream_id: connection.stream_id.clone(),
content: MediaNotificationContent::StreamDisconnected,
});
}
}
}
RtmpEndpointPublisherMessage::StreamMetadataChanged {
publisher,
metadata,
} => match self.connection_details.get(&publisher) {
None => (),
Some(connection) => outputs.media.push(MediaNotification {
stream_id: connection.stream_id.clone(),
content: MediaNotificationContent::Metadata {
data: crate::utils::stream_metadata_to_hash_map(metadata),
},
}),
},
RtmpEndpointPublisherMessage::NewVideoData {
publisher,
data,
timestamp,
is_sequence_header,
is_keyframe,
composition_time_offset,
} => match self.connection_details.get(&publisher) {
None => (),
Some(connection) => {
let is_keyframe_metadata = MetadataEntry::new(
self.is_keyframe_metadata_key,
MetadataValue::Bool(is_keyframe),
&mut self.metadata_buffer,
)
.unwrap(); // Should only happen if type mismatch occurs
let pts_offset_metadata = MetadataEntry::new(
self.pts_offset_metadata_key,
MetadataValue::I32(composition_time_offset),
&mut self.metadata_buffer,
)
.unwrap(); // Should only happen if type mismatch occurs
let metadata = MediaPayloadMetadataCollection::new(
[is_keyframe_metadata, pts_offset_metadata].into_iter(),
&mut self.metadata_buffer,
);
outputs.media.push(MediaNotification {
stream_id: connection.stream_id.clone(),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
is_required_for_decoding: is_sequence_header,
timestamp: Duration::from_millis(timestamp.value.into()),
metadata,
data,
},
});
}
},
RtmpEndpointPublisherMessage::NewAudioData {
publisher,
is_sequence_header,
data,
timestamp,
} => match self.connection_details.get(&publisher) {
None => (),
Some(connection) => {
outputs.media.push(MediaNotification {
stream_id: connection.stream_id.clone(),
content: MediaNotificationContent::MediaPayload {
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
media_type: MediaType::Audio,
timestamp: Duration::from_millis(timestamp.value as u64),
metadata: MediaPayloadMetadataCollection::new(
iter::empty(),
&mut self.metadata_buffer,
),
is_required_for_decoding: is_sequence_header,
data,
},
});
}
},
RtmpEndpointPublisherMessage::PublisherRequiringApproval {
connection_id,
stream_key,
response_channel,
} => {
if let Some(name) = &self.reactor_name {
let (sender, mut receiver) = unbounded_channel();
let _ = self.reactor_manager.send(
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name: name.clone(),
stream_name: stream_key,
response_channel: sender,
},
);
// Start a future waiting for reactor's response
futures_channel.send_on_generic_future_completion(async move {
let is_valid = match receiver.recv().await {
Some(response) => response.is_valid,
None => false, // reactor closed, treat it the same as no workflow scenario
};
FutureResult::ReactorWorkflowReturned {
is_valid,
reactor_receiver: receiver,
response_channel,
}
});
} else {
error!(
connection_id = %connection_id,
stream_key = %stream_key,
"Publisher requires approval for stream key {} but no reactor name was set",
stream_key
);
let _ = response_channel.send(ValidationResponse::Reject);
}
}
}
}
}
impl WorkflowStep for RtmpReceiverStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for future_result in inputs.notifications.drain(..) {
let future_result = match future_result.downcast::<FutureResult>() {
Ok(result) => *result,
Err(_) => {
error!("Rtmp receive step received a notification that is not an 'RtmpReceiveFutureResult' type");
return StepStatus::Error {
message: "Rtmp receive step received a notification that is not an 'RtmpReceiveFutureResult' type".to_string(),
};
}
};
match future_result {
FutureResult::RtmpEndpointDroppedRegistration => {
error!(
"Rtmp receive step stopping as the rtmp endpoint dropped the registration"
);
return StepStatus::Error {
message: "Rtmp receive step stopping as the rtmp endpoint dropped the registration"
.to_string(),
};
}
FutureResult::ReactorManagerGone => {
error!("Reactor manager gone");
return StepStatus::Error {
message: "Reactor manager gone".to_string(),
};
}
FutureResult::ReactorGone => {
if let Some(name) = &self.reactor_name {
error!("Reactor {} is gone", name);
} else {
error!("Got reactor gone signal but step is not using a reactor");
}
return StepStatus::Error {
message: "Reactor gone".to_string(),
};
}
FutureResult::RtmpEndpointResponseReceived(message) => {
self.handle_rtmp_publisher_message(outputs, message, &futures_channel);
}
FutureResult::ReactorWorkflowReturned {
is_valid,
reactor_receiver,
response_channel,
} => {
if is_valid {
let _ = response_channel.send(ValidationResponse::Approve {
reactor_update_channel: reactor_receiver,
});
} else {
let _ = response_channel.send(ValidationResponse::Reject);
}
}
FutureResult::ReactorUpdateReceived {
connection_id,
update,
} => {
// If the update is still valid, then do nothing and wait for the next update
if !update.is_valid {
info!(
connection_id = %connection_id,
"Received update that stream {} is no longer tied to a workflow",
connection_id
);
// TODO: Need some way to disconnect publishers
}
}
FutureResult::ReactorCancellationReceived => {}
}
}
self.status.clone()
}
}
impl Drop for RtmpReceiverStep {
fn drop(&mut self) {
let _ = self
.rtmp_endpoint_sender
.send(RtmpEndpointRequest::RemoveRegistration {
registration_type: RegistrationType::Publisher,
port: self.port,
rtmp_app: self.rtmp_app.clone(),
rtmp_stream_key: self.stream_key.clone(),
});
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/lib.rs | mmids-gstreamer/src/lib.rs | //! This crate contains all components for mmids applications to interact with with gstreamer
//! pipelines.
#[macro_use]
extern crate lazy_static;
pub mod encoders;
pub mod endpoints;
pub mod steps;
pub mod utils;
use gstreamer::glib;
use gstreamer::DebugLevel;
use tracing::{error, info, warn};
lazy_static! {
/// Retrieves the result of initializing gstreamer.
///
/// When this is first called, gstreamer will be initialized and logging handlers will be set
/// up, so that gstreamer logs are written via normal `tracing` log handlers. This also sets
/// up default logging at the WARN level.
///
/// All gstreamer initialization should be done by getting the result of this value instead of
/// manual calls to `gstreamer::init()`.
pub static ref GSTREAMER_INIT_RESULT: Result<(), glib::Error> = {
match gstreamer::init() {
Ok(_) => (),
Err(error) => {
error!("Failed to initialize gstreamer: {:?}", error);
return Err(error);
}
}
// Remove stdout debug logging
gstreamer::debug_remove_default_log_function();
// Add custom logging handler
gstreamer::debug_add_log_function(|category, level, file, function, _line, object, message| {
let message = message.get().map(|o| o.to_string()).unwrap_or_else(|| "".to_string());
let object_name = object.map(|o| o.to_string()).unwrap_or_else(|| "<NO OBJECT>".to_string());
match &level {
DebugLevel::Error => error!(
category = %category.name(),
level = %level.name(),
file = %file,
function = %function,
object = %object_name,
message = %message,
"Gstreamer error ({}): {}", category.name(), message
),
DebugLevel::Warning => warn!(
category = %category.name(),
level = %level.name(),
file = %file,
function = %function,
object = %object_name,
message = %message,
"Gstreamer warning ({}): {}", category.name(), message
),
_ => info!(
category = %category.name(),
level = %level.name(),
file = %file,
function = %function,
object = %object_name,
message = %message,
"Gstreamer {} ({}): {}", level.name(), category.name(), message
),
}
});
// By default log warning and above
gstreamer::debug_set_default_threshold(DebugLevel::Warning);
info!("Gstreamer successfully initialized");
Ok(())
};
}
/// Used to update the gstreamer default log level to info, which can be used for getting more
/// information about failures. Should not be used in production due to the amount of logs that
/// will be raised, and it will not be easy to correlate gstreamer info logs to specific mmids
/// streams.
///
/// **Must** be called after the first invocation of `GSTREAMER_INIT_RESULT`, otherwise the default
/// log level will be overridden to warning.
pub fn set_gstreamer_log_level_to_info() {
gstreamer::debug_set_default_threshold(DebugLevel::Info);
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/utils.rs | mmids-gstreamer/src/utils.rs | //! Common utility functions that are useful for interacting with gstreamer. These are mostly
//! meant for use by code creating custom encoders.
use anyhow::{anyhow, Context, Result};
use bytes::Bytes;
use gstreamer::prelude::*;
use gstreamer::{Buffer, Caps, ClockTime, Element, ElementFactory};
use gstreamer_app::AppSrc;
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use std::sync::Arc;
use std::time::Duration;
/// Function that makes it easy to create a gstreamer `Buffer` based on a set of bytes, an optional
/// decoding timestamp, and an optional presentation timestamp.
pub fn set_gst_buffer(data: Bytes, dts: Option<Duration>, pts: Option<Duration>) -> Result<Buffer> {
let mut buffer = Buffer::with_size(data.len())
.with_context(|| format!("Could not create a buffer with size {}", data.len()))?;
{
let buffer = buffer
.get_mut()
.with_context(|| "Could not get mutable buffer")?;
if let Some(dts) = dts {
buffer.set_dts(ClockTime::from_mseconds(dts.as_millis() as u64));
}
if let Some(pts) = pts {
buffer.set_pts(ClockTime::from_mseconds(pts.as_millis() as u64));
}
let mut sample = buffer
.map_writable()
.with_context(|| "Failed to map buffer to writable buffer map")?;
{
let sample = sample.as_mut_slice();
sample.copy_from_slice(&data);
}
}
Ok(buffer)
}
/// Sets up an video encoder's `appsrc`'s caps based on the specified codec. Since sequence headers
/// are not valid packets for the codec, we can't just push the sequence header into the appsrc's
/// buffer. Instead, different codecs have different mechanisms to pass the sequence header in
/// so it can be utilized, and this provides a central function for that logic.
pub fn set_source_video_sequence_header(
source: &AppSrc,
payload_type: Arc<String>,
buffer: Buffer,
) -> Result<()> {
if payload_type == *VIDEO_CODEC_H264_AVC {
let caps = Caps::builder("video/x-h264")
.field("codec_data", buffer)
.build();
source.set_caps(Some(&caps));
Ok(())
} else {
Err(anyhow!(
"Video codec is not known, and thus we can't prepare the gstreamer pipeline to \
accept it."
))
}
}
pub fn set_source_audio_sequence_header(
source: &AppSrc,
payload_type: Arc<String>,
buffer: Buffer,
) -> Result<()> {
match payload_type {
x if x == *AUDIO_CODEC_AAC_RAW => {
let caps = Caps::builder("audio/mpeg")
.field("mpegversion", 4) // I think this is correct? Unsure 2 vs 4
.field("codec_data", buffer)
.build();
source.set_caps(Some(&caps));
Ok(())
}
other => Err(anyhow!(
"audio codec {other} is not known, and thus we can't prepare the gstreamer pipeline \
to accept it."
)),
}
}
/// Quick function to create an un-named gstreamer element, while providing a consumable error
/// if that fails.
pub fn create_gst_element(name: &str) -> Result<Element> {
ElementFactory::make(name, None).with_context(|| format!("Failed to create element '{}'", name))
}
/// Reads the `codec_data` caps from the provided element. This is usually where sequence header
/// data is contained.
pub fn get_codec_data_from_element(element: &Element) -> Result<Bytes> {
let pad = element
.static_pad("src")
.with_context(|| format!("Failed to get src pad of the {} element", element.name()))?;
let caps = pad
.caps()
.with_context(|| format!("No caps on src pad of the {} element", element.name()))?;
let structure = caps
.structure(0)
.with_context(|| format!("No structure on the pad of the {} element", element.name()))?;
let codec_data = structure.get::<Buffer>("codec_data").with_context(|| {
format!(
"The src pad of the {} element did not have a 'codec_data' field",
element.name()
)
})?;
let map = codec_data.map_readable().with_context(|| {
format!(
"Element {}'s codec data's buffer could not be made readable",
element.name()
)
})?;
let bytes = Bytes::copy_from_slice(map.as_slice());
Ok(bytes)
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/endpoints/mod.rs | mmids-gstreamer/src/endpoints/mod.rs | //! Endpoints that interact with gstreamer.
pub mod gst_transcoder;
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/endpoints/gst_transcoder/transcoding_manager.rs | mmids-gstreamer/src/endpoints/gst_transcoder/transcoding_manager.rs | use crate::encoders::{AudioEncoder, VideoEncoder};
use futures::StreamExt;
use gstreamer::bus::BusStream;
use gstreamer::prelude::*;
use gstreamer::{MessageView, Pipeline, State};
use mmids_core::actor_utils::{notify_on_unbounded_closed, notify_on_unbounded_recv};
use mmids_core::workflows::metadata::{MetadataKey, MetadataValue};
use mmids_core::workflows::{MediaNotificationContent, MediaType};
use mmids_core::VideoTimestamp;
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tracing::{error, info, instrument};
use uuid::Uuid;
pub enum TranscodeManagerRequest {
StopTranscode,
}
pub struct TranscoderParams {
pub process_id: Uuid,
pub video_encoder: Box<dyn VideoEncoder + Send>,
pub audio_encoder: Box<dyn AudioEncoder + Send>,
pub inbound_media: UnboundedReceiver<MediaNotificationContent>,
pub outbound_media: UnboundedSender<MediaNotificationContent>,
pub pipeline: Pipeline,
}
enum TranscoderFutureResult {
EndpointGone,
InboundMediaSendersGone,
OutboundMediaReceiverGone,
RequestReceived(TranscodeManagerRequest),
MediaReceived(MediaNotificationContent),
GstBusClosed,
GstEosReceived,
GstErrorReceived(GstError),
}
struct GstError {
source_name: String,
error_description: String,
debug_info: Option<String>,
}
pub fn start_transcode_manager(
parameters: TranscoderParams,
pts_offset_metadata_key: MetadataKey,
) -> UnboundedSender<TranscodeManagerRequest> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = TranscodeManager::new(parameters, receiver, pts_offset_metadata_key, actor_sender);
tokio::spawn(actor.run(actor_receiver));
sender
}
struct TranscodeManager {
internal_sender: UnboundedSender<TranscoderFutureResult>,
termination_requested: bool,
id: Uuid,
video_encoder: Box<dyn VideoEncoder + Send>,
audio_encoder: Box<dyn AudioEncoder + Send>,
pipeline: Pipeline,
pts_offset_metadata_key: MetadataKey,
}
impl TranscodeManager {
fn new(
parameters: TranscoderParams,
receiver: UnboundedReceiver<TranscodeManagerRequest>,
pts_offset_metadata_key: MetadataKey,
actor_sender: UnboundedSender<TranscoderFutureResult>,
) -> TranscodeManager {
notify_on_unbounded_recv(
receiver,
actor_sender.clone(),
TranscoderFutureResult::RequestReceived,
|| TranscoderFutureResult::EndpointGone,
);
notify_on_unbounded_closed(parameters.outbound_media, actor_sender.clone(), || {
TranscoderFutureResult::OutboundMediaReceiverGone
});
notify_on_unbounded_recv(
parameters.inbound_media,
actor_sender.clone(),
TranscoderFutureResult::MediaReceived,
|| TranscoderFutureResult::InboundMediaSendersGone,
);
TranscodeManager {
internal_sender: actor_sender,
termination_requested: false,
id: parameters.process_id,
video_encoder: parameters.video_encoder,
audio_encoder: parameters.audio_encoder,
pipeline: parameters.pipeline,
pts_offset_metadata_key,
}
}
#[instrument(
name = "Transcode Manager Execution",
skip_all,
fields(transcoding_process_id = %self.id),
)]
async fn run(mut self, mut actor_receiver: UnboundedReceiver<TranscoderFutureResult>) {
info!("Starting transcoding process");
match self.pipeline.set_state(State::Playing) {
Ok(_) => (),
Err(error) => {
error!("Failed to set gstreamer pipeline to playing: {}", error);
return;
}
}
let bus = match self.pipeline.bus() {
Some(bus) => bus,
None => {
error!("Failed to get pipeline bus. Shouldn't happen!");
return;
}
};
notify_bus_message(bus.stream(), self.internal_sender.clone());
while let Some(result) = actor_receiver.recv().await {
match result {
TranscoderFutureResult::EndpointGone => {
info!("Endpoint gone");
break;
}
TranscoderFutureResult::InboundMediaSendersGone => {
info!("No more media senders");
break;
}
TranscoderFutureResult::OutboundMediaReceiverGone => {
info!("Outbound media receiver gone");
break;
}
TranscoderFutureResult::MediaReceived(media) => {
self.handle_media(media);
}
TranscoderFutureResult::RequestReceived(request) => {
self.handle_request(request);
}
TranscoderFutureResult::GstBusClosed => {
info!("Gstreamer bus closed");
break;
}
TranscoderFutureResult::GstEosReceived => {
info!("Gstreamer pipeline sent end of stream signal");
break;
}
TranscoderFutureResult::GstErrorReceived(error) => {
error!(
gst_src = %error.source_name,
gst_error = %error.error_description,
"GStreamer threw an error from element '{}': {} (debug: {})",
error.source_name, error.error_description,
error.debug_info.as_ref().unwrap_or(&("".to_string())),
);
break;
}
}
if self.termination_requested {
info!("Termination requested");
let _ = self.pipeline.set_state(State::Null);
break;
}
}
info!("Stopping transcoding process");
}
fn handle_media(&mut self, media: MediaNotificationContent) {
if let MediaNotificationContent::MediaPayload {
timestamp,
payload_type,
media_type,
data,
metadata,
is_required_for_decoding,
} = media
{
match media_type {
MediaType::Audio => {
let result = self.audio_encoder.push_data(
payload_type,
data,
timestamp,
is_required_for_decoding,
);
if let Err(error) = result {
error!("Failed to push media to audio encoder: {}", error);
self.termination_requested = true;
}
}
MediaType::Video => {
let pts_offset = metadata
.iter()
.filter(|m| m.key() == self.pts_offset_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::I32(num) => Some(num),
_ => None,
})
.next()
.unwrap_or_default();
let pts_duration =
Duration::from_millis(timestamp.as_millis() as u64 + pts_offset as u64);
let video_timestamp = VideoTimestamp::from_durations(timestamp, pts_duration);
let result = self.video_encoder.push_data(
payload_type,
data,
video_timestamp,
is_required_for_decoding,
);
if let Err(error) = result {
error!("Failed to push media to video encoder: {}", error);
self.termination_requested = true;
}
}
MediaType::Other => (), // ignore non audio/video types
}
}
}
fn handle_request(&mut self, request: TranscodeManagerRequest) {
match request {
TranscodeManagerRequest::StopTranscode => {
self.termination_requested = true;
}
}
}
}
fn notify_bus_message(mut bus: BusStream, actor_sender: UnboundedSender<TranscoderFutureResult>) {
tokio::spawn(async move {
loop {
tokio::select! {
result = bus.next() => {
match result {
Some(message) => {
match message.view() {
MessageView::Eos(..) => {
let _ = actor_sender.send(TranscoderFutureResult::GstEosReceived);
},
MessageView::Error(error) => {
let result = TranscoderFutureResult::GstErrorReceived(GstError {
source_name: error
.src()
.map(|s| s.path_string().to_string())
.unwrap_or_else(|| "<none>".to_string()),
error_description: error.error().to_string(),
debug_info: error.debug(),
});
let _ = actor_sender.send(result);
}
_ => (),
}
}
None => {
let _ = actor_sender.send(TranscoderFutureResult::GstBusClosed);
break;
}
}
}
_ = actor_sender.closed() => {
break;
}
}
}
});
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/endpoints/gst_transcoder/mod.rs | mmids-gstreamer/src/endpoints/gst_transcoder/mod.rs | mod transcoding_manager;
use crate::encoders::EncoderFactory;
use crate::endpoints::gst_transcoder::transcoding_manager::{
start_transcode_manager, TranscodeManagerRequest, TranscoderParams,
};
use crate::GSTREAMER_INIT_RESULT;
use gstreamer::{glib, Pipeline};
use mmids_core::actor_utils::{notify_on_unbounded_closed, notify_on_unbounded_recv};
use mmids_core::workflows::metadata::MetadataKey;
use mmids_core::workflows::MediaNotificationContent;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tracing::{error, info, instrument, warn};
use uuid::Uuid;
/// Requests that can be made to the gstreamer transcoding endpoint
pub enum GstTranscoderRequest {
/// Makes a request for the endpoint to start transcoding
StartTranscoding {
/// A unique identifier that is associated with this transcoding request. Used for logging
/// and to associate stop transcoding requests.
id: Uuid,
/// The channel in which audio and video data will come in for the transcoding process
input_media: UnboundedReceiver<MediaNotificationContent>,
/// The name of the video encoder to use for transcoding. Must match a valid name
/// registered with the encoder factory
video_encoder_name: String,
/// The name of hte audio encoder to use for transcoding. Must match a valid name
/// registered with the encoder factory
audio_encoder_name: String,
/// Parameters to pass to the audio encoder
audio_parameters: HashMap<String, Option<String>>,
/// Parameters to pass to the video encoder
video_parameters: HashMap<String, Option<String>>,
/// Channel to send responses and notifications to
notification_channel: UnboundedSender<GstTranscoderNotification>,
},
/// Makes a request for the endpoint to stop transcoding
StopTranscoding {
/// The identifier of the transcoding process to stop.
id: Uuid,
},
}
/// Notifications the transcoding endpoint can raise
pub enum GstTranscoderNotification {
/// Notification that transcoding has started
TranscodingStarted {
/// Channel in which resulting audio and video data will be sent to
output_media: UnboundedReceiver<MediaNotificationContent>,
},
/// Notification that transcoding stopped
TranscodingStopped(GstTranscoderStoppedCause),
}
#[derive(Debug, PartialEq, Eq)]
pub enum EncoderType {
Video,
Audio,
}
/// Reasons transcoding have stopped
#[derive(Debug, PartialEq, Eq)]
pub enum GstTranscoderStoppedCause {
/// No encoder generator has been registered with the encoder factory with the specified name
InvalidEncoderName {
encoder_type: EncoderType,
name: String,
},
/// An error occurred when the encoder was attempted to be created, either due to an error
/// with gstreamer or with invalid parameters
EncoderCreationFailure {
/// What type of encoder that failed
encoder_type: EncoderType,
/// Error description of why a failure occurred.
details: String,
},
/// Transcoding was requested to be started with an id that is already active
IdAlreadyActive(Uuid),
/// Transcoding stopped because a request was made for it to stop.
StopRequested,
/// The transcoding process was unexpectedly terminated without an explicit error being raised.
/// Will probably need to look in logs to get more info on why. This should be rare.
UnexpectedlyTerminated,
}
/// Errors that can occur when attempting to start the endpoint
#[derive(thiserror::Error, Debug)]
pub enum EndpointStartError {
#[error("Gstreamer failed to initialize")]
GstreamerError(#[from] &'static glib::Error),
}
struct StartTranscodeParams {
id: Uuid,
notification_channel: UnboundedSender<GstTranscoderNotification>,
input_media: UnboundedReceiver<MediaNotificationContent>,
video_encoder_name: String,
video_parameters: HashMap<String, Option<String>>,
audio_encoder_name: String,
audio_parameters: HashMap<String, Option<String>>,
}
/// Starts the gstreamer transcode process, and returns a channel in which communication with the
/// endpoint can be made.
pub fn start_gst_transcoder(
encoder_factory: Arc<EncoderFactory>,
pts_offset_metadata_key: MetadataKey,
) -> Result<UnboundedSender<GstTranscoderRequest>, EndpointStartError> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = EndpointActor::new(
receiver,
encoder_factory,
pts_offset_metadata_key,
actor_sender,
)?;
tokio::spawn(actor.run(actor_receiver));
Ok(sender)
}
enum EndpointFuturesResult {
AllConsumersGone,
RequestReceived(GstTranscoderRequest),
TranscodeManagerGone(Uuid),
}
struct ActiveTranscode {
sender: UnboundedSender<TranscodeManagerRequest>,
notification_channel: UnboundedSender<GstTranscoderNotification>,
}
struct EndpointActor {
internal_sender: UnboundedSender<EndpointFuturesResult>,
active_transcodes: HashMap<Uuid, ActiveTranscode>,
encoder_factory: Arc<EncoderFactory>,
pts_offset_metadata_key: MetadataKey,
}
impl EndpointActor {
fn new(
receiver: UnboundedReceiver<GstTranscoderRequest>,
encoder_factory: Arc<EncoderFactory>,
pts_offset_metadata_key: MetadataKey,
actor_sender: UnboundedSender<EndpointFuturesResult>,
) -> Result<EndpointActor, EndpointStartError> {
(*GSTREAMER_INIT_RESULT).as_ref()?;
notify_on_unbounded_recv(
receiver,
actor_sender.clone(),
EndpointFuturesResult::RequestReceived,
|| EndpointFuturesResult::AllConsumersGone,
);
Ok(EndpointActor {
internal_sender: actor_sender,
active_transcodes: HashMap::new(),
encoder_factory,
pts_offset_metadata_key,
})
}
#[instrument(name = "GstTranscodeEndpoint Execution", skip_all)]
async fn run(mut self, mut actor_receiver: UnboundedReceiver<EndpointFuturesResult>) {
info!("Starting endpoint");
while let Some(future) = actor_receiver.recv().await {
match future {
EndpointFuturesResult::AllConsumersGone => {
info!("All consumers gone");
break;
}
EndpointFuturesResult::RequestReceived(request) => {
self.handle_request(request);
}
EndpointFuturesResult::TranscodeManagerGone(id) => {
if let Some(details) = self.active_transcodes.remove(&id) {
info!("Transcode process {} stopped", id);
let _ = details.notification_channel.send(
GstTranscoderNotification::TranscodingStopped(
GstTranscoderStoppedCause::UnexpectedlyTerminated,
),
);
}
}
}
}
info!("Closing endpoint");
}
fn handle_request(&mut self, request: GstTranscoderRequest) {
match request {
GstTranscoderRequest::StartTranscoding {
id,
notification_channel,
input_media,
video_encoder_name,
video_parameters,
audio_encoder_name,
audio_parameters,
} => {
self.handle_start_transcode_request(StartTranscodeParams {
id,
notification_channel,
input_media,
video_encoder_name,
video_parameters,
audio_encoder_name,
audio_parameters,
});
}
GstTranscoderRequest::StopTranscoding { id } => {
info!("Requested transcoding process id {} stopped", id);
if let Some(transcode) = self.active_transcodes.remove(&id) {
let _ = transcode.notification_channel.send(
GstTranscoderNotification::TranscodingStopped(
GstTranscoderStoppedCause::StopRequested,
),
);
let _ = transcode
.sender
.send(TranscodeManagerRequest::StopTranscode);
}
}
}
}
fn handle_start_transcode_request(&mut self, params: StartTranscodeParams) {
if self.active_transcodes.contains_key(¶ms.id) {
warn!(
"Transcoding requested with id {}, but that id is already active",
params.id
);
let _ =
params
.notification_channel
.send(GstTranscoderNotification::TranscodingStopped(
GstTranscoderStoppedCause::IdAlreadyActive(params.id),
));
return;
}
let (outbound_media_sender, outbound_media_receiver) = unbounded_channel();
let pipeline_name = format!("transcode_pipeline_{}", params.id);
let pipeline = Pipeline::new(Some(pipeline_name.as_str()));
let video_encoder = self.encoder_factory.get_video_encoder(
params.video_encoder_name.clone(),
&pipeline,
¶ms.video_parameters,
outbound_media_sender.clone(),
);
let video_encoder = match video_encoder {
Ok(encoder) => encoder,
Err(error) => {
error!(
"Failed to create the {} video encoder: {:?}",
params.video_encoder_name, error,
);
let _ = params.notification_channel.send(
GstTranscoderNotification::TranscodingStopped(
GstTranscoderStoppedCause::EncoderCreationFailure {
encoder_type: EncoderType::Video,
details: format!("{:?}", error),
},
),
);
return;
}
};
let audio_encoder = self.encoder_factory.get_audio_encoder(
params.audio_encoder_name.clone(),
&pipeline,
¶ms.audio_parameters,
outbound_media_sender.clone(),
);
let audio_encoder = match audio_encoder {
Ok(encoder) => encoder,
Err(error) => {
error!(
"Failed to create the {} audio encoder: {:?}",
params.audio_encoder_name, error,
);
let _ = params.notification_channel.send(
GstTranscoderNotification::TranscodingStopped(
GstTranscoderStoppedCause::EncoderCreationFailure {
encoder_type: EncoderType::Audio,
details: format!("{:?}", error),
},
),
);
return;
}
};
let parameters = TranscoderParams {
pipeline,
video_encoder,
audio_encoder,
inbound_media: params.input_media,
outbound_media: outbound_media_sender,
process_id: params.id,
};
let manager = start_transcode_manager(parameters, self.pts_offset_metadata_key);
let _ = params
.notification_channel
.send(GstTranscoderNotification::TranscodingStarted {
output_media: outbound_media_receiver,
});
notify_on_unbounded_closed(manager.clone(), self.internal_sender.clone(), move || {
EndpointFuturesResult::TranscodeManagerGone(params.id)
});
self.active_transcodes.insert(
params.id,
ActiveTranscode {
sender: manager,
notification_channel: params.notification_channel,
},
);
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/video_copy.rs | mmids-gstreamer/src/encoders/video_copy.rs | use crate::encoders::{SampleResult, VideoEncoder, VideoEncoderGenerator};
use crate::utils::create_gst_element;
use anyhow::{anyhow, Context, Result};
use bytes::{Bytes, BytesMut};
use gstreamer::prelude::*;
use gstreamer::{Element, FlowError, FlowSuccess, Pipeline};
use gstreamer_app::{AppSink, AppSinkCallbacks, AppSrc};
use mmids_core::codecs::VIDEO_CODEC_H264_AVC;
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKey, MetadataValue,
};
use mmids_core::workflows::{MediaNotificationContent, MediaType};
use mmids_core::VideoTimestamp;
use std::collections::HashMap;
use std::iter;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tracing::error;
/// Creates an encoder that passes video packets through to the output channel without modification
pub struct VideoCopyEncoderGenerator {
pub pts_offset_metadata_key: MetadataKey,
}
impl VideoEncoderGenerator for VideoCopyEncoderGenerator {
fn create(
&self,
pipeline: &Pipeline,
_parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn VideoEncoder + Send>> {
Ok(Box::new(VideoCopyEncoder::new(
media_sender,
pipeline,
self.pts_offset_metadata_key,
)?))
}
}
struct CodecInfo {
sequence_header: Bytes,
}
struct VideoCopyEncoder {
source: AppSrc,
codec_data: Arc<Mutex<Option<CodecInfo>>>,
}
impl VideoCopyEncoder {
fn new(
media_sender: UnboundedSender<MediaNotificationContent>,
pipeline: &Pipeline,
pts_offset_metadata_key: MetadataKey,
) -> Result<VideoCopyEncoder> {
// While we won't be mutating the stream, we want to pass it through a gstreamer pipeline
// so the packets will be synchronized with audio in case of transcoding delay.
let appsrc = create_gst_element("appsrc")?;
let queue = create_gst_element("queue")?;
let appsink = create_gst_element("appsink")?;
pipeline
.add_many(&[&appsrc, &queue, &appsink])
.with_context(|| "Failed to add video copy encoder's elements to the pipeline")?;
Element::link_many(&[&appsrc, &queue, &appsink])
.with_context(|| "Failed to link video copy encoder's elements together")?;
let appsink = appsink
.dynamic_cast::<AppSink>()
.map_err(|_| anyhow!("Video copy encoder's appsink could not be casted"))?;
let codec_data: Arc<Mutex<Option<CodecInfo>>> = Arc::new(Mutex::new(None));
let copy_of_codec_data = codec_data.clone();
let mut sent_codec_data = false;
let mut codec_data_error_raised = false;
let mut metadata_buffer = BytesMut::new();
appsink.set_callbacks(
AppSinkCallbacks::builder()
.new_sample(move |sink| {
if !sent_codec_data {
let data = match copy_of_codec_data.lock() {
Ok(data) => data,
Err(_) => {
if !codec_data_error_raised {
error!("codec data lock was poisoned");
codec_data_error_raised = true;
}
return Err(FlowError::Error);
}
};
if let Some(info) = &*data {
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: Duration::new(0, 0),
is_required_for_decoding: true,
data: info.sequence_header.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut metadata_buffer),
});
sent_codec_data = true;
} else if !codec_data_error_raised {
error!("Received data prior to codec data being set. This shouldn't happen");
codec_data_error_raised = true;
}
}
let sample = SampleResult::from_sink(sink)
.map_err(|_| FlowError::CustomError)?;
let timestamp = sample.to_video_timestamp();
let pts_offset = MetadataEntry::new(
pts_offset_metadata_key,
MetadataValue::I32(timestamp.pts_offset()),
&mut metadata_buffer,
).unwrap(); // Can only panic if the key is not for an i32
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: timestamp.dts(),
is_required_for_decoding: false,
data: sample.content,
metadata: MediaPayloadMetadataCollection::new(
[pts_offset].into_iter(),
&mut metadata_buffer,
),
});
Ok(FlowSuccess::Ok)
})
.build(),
);
let appsrc = appsrc
.dynamic_cast::<AppSrc>()
.map_err(|_| anyhow!("Video copy encoder's appsrc could not be casted"))?;
Ok(VideoCopyEncoder {
source: appsrc,
codec_data,
})
}
}
impl VideoEncoder for VideoCopyEncoder {
fn push_data(
&self,
_payload_type: Arc<String>,
data: Bytes,
timestamp: VideoTimestamp,
is_sequence_header: bool,
) -> Result<()> {
if is_sequence_header {
let mut codec_data = self
.codec_data
.lock()
.map_err(|_| anyhow!("Video copy encoder's lock was poisoned"))?;
*codec_data = Some(CodecInfo {
sequence_header: data,
})
} else {
let buffer =
crate::utils::set_gst_buffer(data, Some(timestamp.dts()), Some(timestamp.pts()))
.with_context(|| "Failed to set buffer")?;
self.source
.push_buffer(buffer)
.with_context(|| "Could not push buffer into copy encoder's source")?;
}
Ok(())
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/audio_copy.rs | mmids-gstreamer/src/encoders/audio_copy.rs | use crate::encoders::{AudioEncoder, AudioEncoderGenerator, SampleResult};
use crate::utils::{create_gst_element, set_gst_buffer};
use anyhow::{anyhow, Context, Result};
use bytes::{Bytes, BytesMut};
use gstreamer::prelude::*;
use gstreamer::{Element, FlowError, FlowSuccess, Pipeline};
use gstreamer_app::{AppSink, AppSinkCallbacks, AppSrc};
use mmids_core::workflows::metadata::MediaPayloadMetadataCollection;
use mmids_core::workflows::{MediaNotificationContent, MediaType};
use std::collections::HashMap;
use std::iter;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tracing::error;
/// Generates an audio encoder that passes audio packets to the output channel without modification.
pub struct AudioCopyEncoderGenerator {}
impl AudioEncoderGenerator for AudioCopyEncoderGenerator {
fn create(
&self,
pipeline: &Pipeline,
_parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn AudioEncoder + Send>> {
Ok(Box::new(AudioCopyEncoder::new(media_sender, pipeline)?))
}
}
struct CodecInfo {
payload_type: Arc<String>,
sequence_header: Bytes,
}
struct AudioCopyEncoder {
source: AppSrc,
codec_data: Arc<Mutex<Option<CodecInfo>>>,
}
impl AudioCopyEncoder {
fn new(
media_sender: UnboundedSender<MediaNotificationContent>,
pipeline: &Pipeline,
) -> Result<AudioCopyEncoder> {
// While we won't be mutating the stream, we want to pass it through a gstreamer pipeline
// so the packets will be synchronized with possibly transcoded video delay.
let appsrc = create_gst_element("appsrc")?;
let queue = create_gst_element("queue")?;
let appsink = create_gst_element("appsink")?;
pipeline
.add_many(&[&appsrc, &queue, &appsink])
.with_context(|| "Failed to add audio copy encoder's elements to the pipeline")?;
Element::link_many(&[&appsrc, &queue, &appsink])
.with_context(|| "Failed to link audio copy encoder's elements together")?;
let appsink = appsink
.dynamic_cast::<AppSink>()
.map_err(|_| anyhow!("Audio copy encoder's appsink could not be casted"))?;
let codec_data: Arc<Mutex<Option<CodecInfo>>> = Arc::new(Mutex::new(None));
let copy_of_codec_data = codec_data.clone();
let mut sent_codec_data = false;
let mut codec_data_error_raised = false;
let mut metadata_buffer = BytesMut::new();
appsink.set_callbacks(
AppSinkCallbacks::builder()
.new_sample(move |sink| {
let data = match copy_of_codec_data.lock() {
Ok(data) => data,
Err(_) => {
if !codec_data_error_raised {
error!("codec data lock was poisoned");
codec_data_error_raised = true;
}
return Err(FlowError::Error);
}
};
let info = match &*data {
Some(info) => info,
None => {
if !codec_data_error_raised {
error!("Received data prior to codec data being set. This shouldn't happen");
codec_data_error_raised = true;
}
return Err(FlowError::Error);
}
};
if !sent_codec_data {
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
payload_type: info.payload_type.clone(),
media_type: MediaType::Audio,
data: info.sequence_header.clone(),
timestamp: Duration::new(0, 0),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut metadata_buffer),
is_required_for_decoding: true,
});
sent_codec_data = true;
}
let sample = SampleResult::from_sink(sink)
.map_err(|_| FlowError::CustomError)?;
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
payload_type: info.payload_type.clone(),
media_type: MediaType::Audio,
data: sample.content,
timestamp: sample.dts.unwrap_or(Duration::new(0, 0)),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut metadata_buffer),
is_required_for_decoding: false,
});
Ok(FlowSuccess::Ok)
})
.build(),
);
let appsrc = appsrc
.dynamic_cast::<AppSrc>()
.map_err(|_| anyhow!("Audio copy encoder's appsrc could not be casted"))?;
Ok(AudioCopyEncoder {
source: appsrc,
codec_data,
})
}
}
impl AudioEncoder for AudioCopyEncoder {
fn push_data(
&self,
payload_type: Arc<String>,
data: Bytes,
timestamp: Duration,
is_sequence_header: bool,
) -> Result<()> {
if is_sequence_header {
let mut codec_data = self
.codec_data
.lock()
.map_err(|_| anyhow!("Audio copy encoder's lock was poisoned"))?;
*codec_data = Some(CodecInfo {
payload_type,
sequence_header: data,
})
} else {
let buffer = set_gst_buffer(data, Some(timestamp), None)
.with_context(|| "Failed to set audio buffer")?;
self.source
.push_buffer(buffer)
.with_context(|| "Could not push buffer into audio copy encoder's source")?;
}
Ok(())
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/video_drop.rs | mmids-gstreamer/src/encoders/video_drop.rs | use crate::encoders::{VideoEncoder, VideoEncoderGenerator};
use bytes::Bytes;
use gstreamer::Pipeline;
use mmids_core::workflows::MediaNotificationContent;
use mmids_core::VideoTimestamp;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
/// Creates a video encoder that drops audio.
pub struct VideoDropEncoderGenerator {}
impl VideoEncoderGenerator for VideoDropEncoderGenerator {
fn create(
&self,
_pipeline: &Pipeline,
_parameters: &HashMap<String, Option<String>>,
_media_sender: UnboundedSender<MediaNotificationContent>,
) -> anyhow::Result<Box<dyn VideoEncoder + Send>> {
Ok(Box::new(VideoDropEncoder {}))
}
}
struct VideoDropEncoder {}
impl VideoEncoder for VideoDropEncoder {
fn push_data(
&self,
_payload: Arc<String>,
_data: Bytes,
_timestamp: VideoTimestamp,
_is_sequence_header: bool,
) -> anyhow::Result<()> {
// Do nothing since we want to drop the video stream
Ok(())
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/audio_avenc_aac.rs | mmids-gstreamer/src/encoders/audio_avenc_aac.rs | use crate::encoders::{AudioEncoder, AudioEncoderGenerator, SampleResult};
use crate::utils::{
create_gst_element, get_codec_data_from_element, set_gst_buffer,
set_source_audio_sequence_header,
};
use anyhow::{anyhow, Context, Result};
use bytes::{Bytes, BytesMut};
use gstreamer::prelude::*;
use gstreamer::{Element, FlowError, FlowSuccess, Pipeline};
use gstreamer_app::{AppSink, AppSinkCallbacks, AppSrc};
use mmids_core::codecs::AUDIO_CODEC_AAC_RAW;
use mmids_core::workflows::metadata::MediaPayloadMetadataCollection;
use mmids_core::workflows::{MediaNotificationContent, MediaType};
use std::collections::HashMap;
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{error, warn};
/// Creates an audio encoder that uses the gstreamer `avenc_aac` encoder to encode audio into aac.
///
/// This encoder supports the following optional parameters:
/// * `bitrate` - The average **bytes** per second to target.
pub struct AvencAacEncoderGenerator {}
impl AudioEncoderGenerator for AvencAacEncoderGenerator {
fn create(
&self,
pipeline: &Pipeline,
parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn AudioEncoder + Send>> {
Ok(Box::new(AvencAacEncoder::new(
media_sender,
parameters,
pipeline,
)?))
}
}
struct AvencAacEncoder {
source: AppSrc,
}
impl AvencAacEncoder {
fn new(
media_sender: UnboundedSender<MediaNotificationContent>,
parameters: &HashMap<String, Option<String>>,
pipeline: &Pipeline,
) -> Result<AvencAacEncoder> {
let bitrate = get_number(parameters, "bitrate");
let appsrc = create_gst_element("appsrc")?;
let queue = create_gst_element("queue")?;
let decodebin = create_gst_element("decodebin")?;
let convert = create_gst_element("audioconvert")?;
let encoder = create_gst_element("avenc_aac")?;
let output_parser = create_gst_element("aacparse")?;
let appsink = create_gst_element("appsink")?;
pipeline
.add_many(&[
&appsrc,
&queue,
&decodebin,
&encoder,
&output_parser,
&appsink,
&convert,
])
.with_context(|| "Failed to add avenc_aac encoder's elements to the pipeline")?;
Element::link_many(&[&appsrc, &queue, &decodebin])
.with_context(|| "Failed to link appsrc -> queue -> decodebin for avenc_aac encoder")?;
Element::link_many(&[&convert, &encoder, &output_parser, &appsink])
.with_context(|| "Failed to link avenc_aac -> aacparse -> appsink")?;
// decodebin's pad is added dynamically
let link_destination = convert;
decodebin.connect_pad_added(move |src, src_pad| {
match src.link_pads(Some(&src_pad.name()), &link_destination.clone(), None) {
Ok(_) => (),
Err(_) => error!(
"Failed to link `decodebin`'s {} pad to the avenc_aac element",
src_pad.name()
),
}
});
if let Some(bitrate) = bitrate {
encoder.set_property("bitrate", bitrate);
}
let appsink = appsink
.dynamic_cast::<AppSink>()
.map_err(|_| anyhow!("appsink could not be cast to `AppSink`"))?;
let mut sent_codec_data = false;
let mut metadata_buffer = BytesMut::new();
appsink.set_callbacks(
AppSinkCallbacks::builder()
.new_sample(move |sink| {
match sample_received(
sink,
&mut sent_codec_data,
&output_parser,
media_sender.clone(),
&mut metadata_buffer,
) {
Ok(_) => Ok(FlowSuccess::Ok),
Err(error) => {
error!("new_sample callback error received: {:?}", error);
Err(FlowError::Error)
}
}
})
.build(),
);
let appsrc = appsrc
.dynamic_cast::<AppSrc>()
.map_err(|_| anyhow!("source element could not be cast to `AppSrc`"))?;
Ok(AvencAacEncoder { source: appsrc })
}
}
impl AudioEncoder for AvencAacEncoder {
fn push_data(
&self,
payload_type: Arc<String>,
data: Bytes,
timestamp: Duration,
is_sequence_header: bool,
) -> Result<()> {
let buffer = set_gst_buffer(data, Some(timestamp), None)
.with_context(|| "Failed to create aac buffer")?;
if is_sequence_header {
set_source_audio_sequence_header(&self.source, payload_type, buffer)
.with_context(|| " Failed to set aac sequence header into pipeline")?;
} else {
self.source
.push_buffer(buffer)
.with_context(|| "Failed to push buffer into audio source")?;
}
Ok(())
}
}
fn get_number(parameters: &HashMap<String, Option<String>>, key: &str) -> Option<i32> {
if let Some(Some(inner)) = parameters.get(key) {
match inner.parse() {
Ok(num) => return Some(num),
Err(_) => warn!("Parameter {key} had a value of '{inner}', which is not a number"),
}
}
None
}
fn sample_received(
sink: &AppSink,
codec_data_sent: &mut bool,
output_parser: &Element,
media_sender: UnboundedSender<MediaNotificationContent>,
metadata_buffer: &mut BytesMut,
) -> Result<()> {
if !*codec_data_sent {
// Pull the codec_data out of the output parser to get the sequence header
let codec_data = get_codec_data_from_element(output_parser)?;
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
media_type: MediaType::Audio,
timestamp: Duration::from_millis(0),
is_required_for_decoding: true,
data: codec_data,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), metadata_buffer),
});
*codec_data_sent = true;
}
let sample = SampleResult::from_sink(sink).with_context(|| "Failed to get aac sample")?;
if let Some(dts) = sample.dts {
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
media_type: MediaType::Audio,
timestamp: dts,
is_required_for_decoding: false,
data: sample.content,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), metadata_buffer),
});
Ok(())
} else {
Err(anyhow!(
"No dts found for AAC sample, and thus timestamp is unknown!"
))
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/audio_drop.rs | mmids-gstreamer/src/encoders/audio_drop.rs | use crate::encoders::{AudioEncoder, AudioEncoderGenerator};
use anyhow::Result;
use bytes::Bytes;
use gstreamer::Pipeline;
use mmids_core::workflows::MediaNotificationContent;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
/// Creates an encoder that drops the audio stream.
pub struct AudioDropEncoderGenerator {}
impl AudioEncoderGenerator for AudioDropEncoderGenerator {
fn create(
&self,
_pipeline: &Pipeline,
_parameters: &HashMap<String, Option<String>>,
_media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn AudioEncoder + Send>> {
Ok(Box::new(AudioDropEncoder {}))
}
}
struct AudioDropEncoder {}
impl AudioEncoder for AudioDropEncoder {
fn push_data(
&self,
_codec: Arc<String>,
_data: Bytes,
_timestamp: Duration,
_is_sequence_header: bool,
) -> Result<()> {
// Do nothing with the data since we are dropping the audio stream
Ok(())
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/mod.rs | mmids-gstreamer/src/encoders/mod.rs | //! An encoder represents a part of a gstreamer pipeline that takes video or audio data, processes
//! it, and then pushes the results out into a tokio channel.
mod audio_avenc_aac;
mod audio_copy;
mod audio_drop;
mod video_copy;
mod video_drop;
mod video_x264;
use anyhow::{Context, Result};
use bytes::Bytes;
use gstreamer::{Format, GenericFormattedValue, Pipeline};
use gstreamer_app::AppSink;
use mmids_core::workflows::MediaNotificationContent;
use mmids_core::VideoTimestamp;
use std::collections::HashMap;
use std::default::Default;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
pub use audio_avenc_aac::AvencAacEncoderGenerator;
pub use audio_copy::AudioCopyEncoderGenerator;
pub use audio_drop::AudioDropEncoderGenerator;
pub use video_copy::VideoCopyEncoderGenerator;
pub use video_drop::VideoDropEncoderGenerator;
pub use video_x264::X264EncoderGenerator;
/// An encoder that processes video in its pipeline. It is expected that each instance of an
/// encoder is used by one stream at a time, even if multiple media streams require the same
/// transcoding parameters.
pub trait VideoEncoder {
/// Pushes a video frame into the encoder's pipeline
fn push_data(
&self,
payload_type: Arc<String>,
data: Bytes,
timestamp: VideoTimestamp,
is_sequence_header: bool,
) -> Result<()>;
}
/// An encoder that processes audio in its pipeline. It is expected that each instance of an
/// encoder is used by one stream at a time, even if multiple media streams require the same
/// transcoding parameters.
pub trait AudioEncoder {
/// Pushes an audio frame into the encoder's pipeline
fn push_data(
&self,
payload_type: Arc<String>,
data: Bytes,
timestamp: Duration,
is_sequence_header: bool,
) -> Result<()>;
}
/// Errors that can occur when registering an encoder with the encoder factory
#[derive(thiserror::Error, Debug)]
pub enum EncoderFactoryRegistrationError {
#[error("An encoder already is registered with the name '{0}'")]
DuplicateName(String),
}
/// Errors that can occur when retrieving an encoder from the encoder factory
#[derive(thiserror::Error, Debug)]
pub enum EncoderFactoryCreationError {
#[error("No encoder exists with the name '{0}'")]
NoEncoderWithName(String),
#[error("Creation of the encoder failed")]
CreationFailed(#[from] anyhow::Error),
}
/// A type that can generate a new instance of a specific video encoder.
pub trait VideoEncoderGenerator {
fn create(
&self,
pipeline: &Pipeline,
parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> anyhow::Result<Box<dyn VideoEncoder + Send>>;
}
/// A type that can generate a new instance for a specific audio encoder.
pub trait AudioEncoderGenerator {
fn create(
&self,
pipeline: &Pipeline,
parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> anyhow::Result<Box<dyn AudioEncoder + Send>>;
}
/// Allows encoder generators to be registered and be referred to via a name that given at
/// registration time. When an encoder instance is required, the encoder generator requested is
/// invoked and the resulting encoder (or error) is returned.
#[derive(Default)]
pub struct EncoderFactory {
video_encoders: HashMap<String, Box<dyn VideoEncoderGenerator + Send + Sync>>,
audio_encoders: HashMap<String, Box<dyn AudioEncoderGenerator + Send + Sync>>,
}
impl EncoderFactory {
/// Creates a new encoder factory
pub fn new() -> EncoderFactory {
Default::default()
}
/// Registers a video encoder generator that can be invoked with a specific name
pub fn register_video_encoder(
&mut self,
name: &str,
encoder_generator: Box<dyn VideoEncoderGenerator + Send + Sync>,
) -> Result<(), EncoderFactoryRegistrationError> {
if self.video_encoders.contains_key(name) {
return Err(EncoderFactoryRegistrationError::DuplicateName(
name.to_string(),
));
}
self.video_encoders
.insert(name.to_string(), encoder_generator);
Ok(())
}
/// Registers an audio encoder generator that can be invoked with a specific name
pub fn register_audio_encoder(
&mut self,
name: &str,
encoder_generator: Box<dyn AudioEncoderGenerator + Send + Sync>,
) -> Result<(), EncoderFactoryRegistrationError> {
if self.audio_encoders.contains_key(name) {
return Err(EncoderFactoryRegistrationError::DuplicateName(
name.to_string(),
));
}
self.audio_encoders
.insert(name.to_string(), encoder_generator);
Ok(())
}
/// Creates a new instance of a video encoder based on the name it was specified with at
/// registration
pub fn get_video_encoder(
&self,
name: String,
pipeline: &Pipeline,
parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn VideoEncoder + Send>, EncoderFactoryCreationError> {
let generator = match self.video_encoders.get(name.as_str()) {
Some(generator) => generator,
None => return Err(EncoderFactoryCreationError::NoEncoderWithName(name)),
};
let encoder = generator.create(pipeline, parameters, media_sender)?;
Ok(encoder)
}
/// Creates a new instance of an audio encoder based on the name it was specified with at
/// registration
pub fn get_audio_encoder(
&self,
name: String,
pipeline: &Pipeline,
parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn AudioEncoder + Send>, EncoderFactoryCreationError> {
let generator = match self.audio_encoders.get(name.as_str()) {
Some(generator) => generator,
None => return Err(EncoderFactoryCreationError::NoEncoderWithName(name)),
};
let encoder = generator.create(pipeline, parameters, media_sender)?;
Ok(encoder)
}
}
/// Helper struct that contains the result after parsing a sample pulled from an `appsrc` gstreamer
/// element. Only used within encoder implementations.
pub struct SampleResult {
content: Bytes,
dts: Option<Duration>,
pts: Option<Duration>,
}
impl SampleResult {
/// Pulls a sample from the `appsink` element and attempts to parse the contents from it.
pub fn from_sink(sink: &AppSink) -> Result<SampleResult> {
let sample = sink.pull_sample().with_context(|| "Sink had no sample")?;
let buffer = sample.buffer().with_context(|| "Sample had no buffer")?;
let map = buffer
.map_readable()
.with_context(|| "Sample's buffer could not be mapped as readable")?;
let mut dts = buffer.dts();
let mut pts = buffer.pts();
if let Some(segment) = sample.segment() {
// Sometimes the segment has a format of Bytes. Unsure what that means, but if we
// try to convert that to running time it will panic.
if segment.format() == Format::Time {
// Some encoders will have a dts and pts value that does not necessarily start at
// 00:00:00. This is done for various reasons, but for instance the x264 gstreamer
// encoder will start at 1000:00:00 to better handle negative dts for B frames. If we
// use the dts and pts values as is, then players will have weird times showing, and
// sync errors may occur. When this happens the sample will have a segment, and that
// segment can be used to adjust the pts and dts times to be from 00:00:00
if let Some(original) = dts {
if let GenericFormattedValue::Time(Some(adjusted)) =
segment.to_running_time(original)
{
dts = Some(adjusted);
}
}
if let Some(original) = pts {
if let GenericFormattedValue::Time(Some(adjusted)) =
segment.to_running_time(original)
{
pts = Some(adjusted);
}
}
}
}
let dts = dts.map(|x| Duration::from_millis(x.mseconds()));
let pts = pts.map(|x| Duration::from_millis(x.mseconds()));
Ok(SampleResult {
content: Bytes::copy_from_slice(map.as_slice()),
dts,
pts,
})
}
/// Converts the dts and pts from a sample into a video timestamp.
pub fn to_video_timestamp(&self) -> VideoTimestamp {
match (&self.dts, &self.pts) {
(None, None) => VideoTimestamp::from_zero(),
(Some(dts), Some(pts)) => VideoTimestamp::from_durations(*dts, *pts),
(Some(dts), None) => VideoTimestamp::from_durations(*dts, Duration::from_millis(0)),
(None, Some(pts)) => VideoTimestamp::from_durations(*pts, Duration::from_millis(0)),
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/encoders/video_x264.rs | mmids-gstreamer/src/encoders/video_x264.rs | use crate::encoders::{SampleResult, VideoEncoder, VideoEncoderGenerator};
use crate::utils::{create_gst_element, get_codec_data_from_element};
use anyhow::{anyhow, Context, Result};
use bytes::{Bytes, BytesMut};
use gstreamer::prelude::*;
use gstreamer::{Caps, Element, FlowError, FlowSuccess, Fraction, Pipeline};
use gstreamer_app::{AppSink, AppSinkCallbacks, AppSrc};
use mmids_core::codecs::VIDEO_CODEC_H264_AVC;
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKey, MetadataValue,
};
use mmids_core::workflows::{MediaNotificationContent, MediaType};
use mmids_core::VideoTimestamp;
use std::collections::HashMap;
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{error, warn};
/// Creates a video encoder that uses the gstreamer `x264enc` encoder to encode video into h264
/// video.
///
/// This encoder supports the following optional parameters:
/// * `width` - How many pixels wide the resulting video should be
/// * `height` - How many pixels high the resulting video should be
/// * `fps` - The exact fps the resulting video should be
/// * `bitrate` - the desired bitrate specified in **kbps**. Output will be encoded with constant bitrate
/// * `preset` - The `speed-preset` value to use in the encoder. Valid values are: `ultrafast`,
/// `superfast`, `veryfast`, `faster`, `fast`, `medium`, `slow`, `slower`, `veryslow`. The default
/// is `medium`.
pub struct X264EncoderGenerator {
pub pts_offset_metadata_key: MetadataKey,
}
impl VideoEncoderGenerator for X264EncoderGenerator {
fn create(
&self,
pipeline: &Pipeline,
parameters: &HashMap<String, Option<String>>,
media_sender: UnboundedSender<MediaNotificationContent>,
) -> Result<Box<dyn VideoEncoder + Send>> {
Ok(Box::new(X264Encoder::new(
media_sender,
parameters,
pipeline,
self.pts_offset_metadata_key,
)?))
}
}
struct X264Encoder {
source: AppSrc,
}
impl X264Encoder {
fn new(
media_sender: UnboundedSender<MediaNotificationContent>,
parameters: &HashMap<String, Option<String>>,
pipeline: &Pipeline,
pts_offset_metadata_key: MetadataKey,
) -> Result<X264Encoder> {
let height = get_number(parameters, "height");
let width = get_number(parameters, "width");
let preset = parameters.get("preset").unwrap_or(&None);
let fps = get_number(parameters, "fps");
let bitrate = get_number(parameters, "bitrate");
let appsrc = create_gst_element("appsrc")?;
let queue = create_gst_element("queue")?;
let decoder = create_gst_element("decodebin")?;
let scale = create_gst_element("videoscale")?;
let rate_changer = create_gst_element("videorate")?;
let capsfilter = create_gst_element("capsfilter")?;
let encoder = create_gst_element("x264enc")?;
let output_parser = create_gst_element("h264parse")?;
let appsink = create_gst_element("appsink")?;
pipeline
.add_many(&[
&appsrc,
&queue,
&decoder,
&scale,
&rate_changer,
&capsfilter,
&encoder,
&output_parser,
&appsink,
])
.with_context(|| "Failed to add x264 encoder's elements to pipeline")?;
Element::link_many(&[&appsrc, &queue, &decoder])
.with_context(|| "Failed to link appsrc -> queue -> decoder")?;
Element::link_many(&[
&scale,
&rate_changer,
&capsfilter,
&encoder,
&output_parser,
&appsink,
])
.with_context(|| "Failed to link scale to sink")?;
// decodebin's video pad is added dynamically
let link_destination = scale;
decoder.connect_pad_added(move |src, src_pad| {
match src.link_pads(
Some(&src_pad.name()),
&link_destination.clone(),
Some("sink"),
) {
Ok(_) => (),
Err(_) => error!(
src_caps = ?src_pad.caps(),
dest_caps = ?link_destination.static_pad("sink").unwrap().caps(),
"Failed to link `decodebin`'s {} pad to videoscale element",
src_pad.name()
),
}
});
let mut caps = Caps::builder("video/x-raw");
if let Some(height) = height {
caps = caps.field("height", height as i32);
}
if let Some(width) = width {
caps = caps.field("width", width as i32);
}
if let Some(fps) = fps {
caps = caps.field("framerate", Fraction::new(fps as i32, 1));
}
let caps = caps.build();
capsfilter.set_property("caps", caps);
encoder.set_property_from_str("tune", "zerolatency");
if let Some(preset) = preset {
encoder.set_property_from_str("speed-preset", preset.as_str());
}
if let Some(bitrate) = bitrate {
encoder.set_property("bitrate", bitrate);
}
let appsink = appsink
.dynamic_cast::<AppSink>()
.map_err(|_| anyhow!("appsink could not be cast to 'AppSink'"))?;
let mut sent_codec_data = false;
let mut metadata_buffer = BytesMut::new();
appsink.set_callbacks(
AppSinkCallbacks::builder()
.new_sample(move |sink| {
match sample_received(
sink,
&mut sent_codec_data,
&output_parser,
media_sender.clone(),
pts_offset_metadata_key,
&mut metadata_buffer,
) {
Ok(_) => Ok(FlowSuccess::Ok),
Err(error) => {
error!("new_sample callback error received: {:?}", error);
Err(FlowError::Error)
}
}
})
.build(),
);
let appsrc = appsrc
.dynamic_cast::<AppSrc>()
.map_err(|_| anyhow!("source element could not be cast to 'Appsrc'"))?;
Ok(X264Encoder { source: appsrc })
}
}
impl VideoEncoder for X264Encoder {
fn push_data(
&self,
payload_type: Arc<String>,
data: Bytes,
timestamp: VideoTimestamp,
is_sequence_header: bool,
) -> Result<()> {
let buffer =
crate::utils::set_gst_buffer(data, Some(timestamp.dts()), Some(timestamp.pts()))
.with_context(|| "Failed to set buffer")?;
if is_sequence_header {
crate::utils::set_source_video_sequence_header(&self.source, payload_type, buffer)
.with_context(|| "Failed to set sequence header for x264 encoder")?;
} else {
self.source
.push_buffer(buffer)
.with_context(|| "Failed to push the buffer into video source")?;
}
Ok(())
}
}
fn get_number(parameters: &HashMap<String, Option<String>>, key: &str) -> Option<u32> {
if let Some(Some(inner)) = parameters.get(key) {
match inner.parse() {
Ok(num) => return Some(num),
Err(_) => warn!("Parameter {key} had a value of '{inner}', which is not a number"),
}
}
None
}
fn sample_received(
sink: &AppSink,
codec_data_sent: &mut bool,
output_parser: &Element,
media_sender: UnboundedSender<MediaNotificationContent>,
pts_offset_metadata_key: MetadataKey,
metadata_buffer: &mut BytesMut,
) -> Result<()> {
if !*codec_data_sent {
// Pull the codec_data/sequence header out from the output parser
let codec_data = get_codec_data_from_element(output_parser)?;
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: Duration::from_millis(0),
is_required_for_decoding: true,
data: codec_data,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), metadata_buffer),
});
*codec_data_sent = true;
}
let sample = SampleResult::from_sink(sink).with_context(|| "Failed to get x264enc sample")?;
let timestamp = sample.to_video_timestamp();
let pts_offset = MetadataEntry::new(
pts_offset_metadata_key,
MetadataValue::I32(timestamp.pts_offset()),
metadata_buffer,
)
.unwrap(); // Can only panic if the key is not for an i32
let _ = media_sender.send(MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: timestamp.dts(),
is_required_for_decoding: false,
data: sample.content,
metadata: MediaPayloadMetadataCollection::new([pts_offset].into_iter(), metadata_buffer),
});
Ok(())
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/steps/mod.rs | mmids-gstreamer/src/steps/mod.rs | //! Workflow steps dealing with gstreamer based endpoints
pub mod basic_transcoder;
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-gstreamer/src/steps/basic_transcoder/mod.rs | mmids-gstreamer/src/steps/basic_transcoder/mod.rs | //! The basic transcoding workflow step that allows transcoding audio and video based on passed
//! in parameters. This step expects at least an `audio` and `video` parameter to be specified, each
//! with the name of the respective audio and video encoder to use.
//!
//! Each encoder may have encoder specific parameters that can be specified by prefixing each
//! parameter with either `audio_` or `video_`. These prefixes allow the workflow step to know
//! which encoder to route the each parameter to. The prefix is removed from the parameter before
//! passing it to the encoder, so `video_bitrate` gets passed to the video encoder as `bitrate`.
use crate::endpoints::gst_transcoder::{
GstTranscoderNotification, GstTranscoderRequest, GstTranscoderStoppedCause,
};
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::{
FuturesChannelInnerResult, WorkflowStepFuturesChannel,
};
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::workflows::{MediaNotification, MediaNotificationContent};
use mmids_core::StreamId;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use tracing::{error, info, instrument, warn};
use uuid::Uuid;
pub const VIDEO_ENCODER: &str = "video";
pub const AUDIO_ENCODER: &str = "audio";
pub const VIDEO_PARAM_PREFIX: &str = "video_";
pub const AUDIO_PARAM_PREFIX: &str = "audio_";
/// Creates a new instance of the basic transcode workflow step.
pub struct BasicTranscodeStepGenerator {
transcode_endpoint: UnboundedSender<GstTranscoderRequest>,
}
struct ActiveTranscode {
media_sender: UnboundedSender<MediaNotificationContent>,
transcode_process_id: Uuid,
stream_name: Arc<String>,
}
struct BasicTranscodeStep {
transcoder_endpoint: UnboundedSender<GstTranscoderRequest>,
active_transcodes: HashMap<StreamId, ActiveTranscode>,
video_encoder_name: String,
audio_encoder_name: String,
video_parameters: HashMap<String, Option<String>>,
audio_parameters: HashMap<String, Option<String>>,
}
enum FutureResult {
TranscoderEndpointGone,
TranscoderNotificationSenderGone(StreamId),
TranscoderNotificationReceived {
stream_id: StreamId,
notification: GstTranscoderNotification,
},
TranscodedMediaChannelClosed(StreamId),
}
impl StepFutureResult for FutureResult {}
#[derive(thiserror::Error, Debug)]
enum StepStartupError {
#[error("No video encoder specified")]
NoVideoEncoderSpecified,
#[error("No audio encoder specified")]
NoAudioEncoderSpecified,
}
impl BasicTranscodeStepGenerator {
pub fn new(
transcode_endpoint: UnboundedSender<GstTranscoderRequest>,
) -> BasicTranscodeStepGenerator {
BasicTranscodeStepGenerator { transcode_endpoint }
}
}
impl StepGenerator for BasicTranscodeStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let video_encoder_name = match definition.parameters.get(VIDEO_ENCODER) {
Some(Some(encoder)) => encoder.clone(),
_ => return Err(Box::new(StepStartupError::NoVideoEncoderSpecified)),
};
let audio_encoder_name = match definition.parameters.get(AUDIO_ENCODER) {
Some(Some(encoder)) => encoder.clone(),
_ => return Err(Box::new(StepStartupError::NoAudioEncoderSpecified)),
};
// Split out audio and video specific parameters based on prefixes.
let mut audio_params = HashMap::new();
let mut video_params = HashMap::new();
for (key, value) in &definition.parameters {
if key.starts_with(VIDEO_PARAM_PREFIX) && key.len() > VIDEO_PARAM_PREFIX.len() {
video_params.insert(key[VIDEO_PARAM_PREFIX.len()..].to_string(), value.clone());
}
if key.starts_with(AUDIO_PARAM_PREFIX) && key.len() > AUDIO_PARAM_PREFIX.len() {
audio_params.insert(key[AUDIO_PARAM_PREFIX.len()..].to_string(), value.clone());
}
}
let step = BasicTranscodeStep {
transcoder_endpoint: self.transcode_endpoint.clone(),
active_transcodes: HashMap::new(),
video_encoder_name,
audio_encoder_name,
video_parameters: video_params,
audio_parameters: audio_params,
};
let transcode_endpoint = self.transcode_endpoint.clone();
futures_channel.send_on_generic_future_completion(async move {
transcode_endpoint.closed().await;
FutureResult::TranscoderEndpointGone
});
Ok((Box::new(step), StepStatus::Active))
}
}
impl BasicTranscodeStep {
fn stop_all_transcodes(&mut self) {
let stream_ids = self.active_transcodes.keys().cloned().collect::<Vec<_>>();
for stream_id in stream_ids {
self.stop_transcode(stream_id);
}
}
#[instrument(skip(self))]
fn stop_transcode(&mut self, stream_id: StreamId) {
if let Some(transcode) = self.active_transcodes.remove(&stream_id) {
info!("Stopping transcode");
let _ = self
.transcoder_endpoint
.send(GstTranscoderRequest::StopTranscoding {
id: transcode.transcode_process_id,
});
}
}
#[instrument(skip_all, fields(stream_id = ?stream_id, stream_name = %stream_name))]
fn start_transcode(
&mut self,
stream_id: StreamId,
stream_name: Arc<String>,
futures_channel: &WorkflowStepFuturesChannel,
) {
if self.active_transcodes.contains_key(&stream_id) {
warn!(
"Attempted to start transcode for stream that already has a transcode in progress"
);
return;
}
let (media_sender, media_receiver) = unbounded_channel();
let (notification_sender, notification_receiver) = unbounded_channel();
let process_id = Uuid::new_v4();
self.active_transcodes.insert(
stream_id.clone(),
ActiveTranscode {
transcode_process_id: process_id,
media_sender,
stream_name: stream_name.clone(),
},
);
info!(
"Starting transcode process id {} for stream {}",
process_id, stream_name
);
let _ = self
.transcoder_endpoint
.send(GstTranscoderRequest::StartTranscoding {
id: process_id,
notification_channel: notification_sender,
input_media: media_receiver,
video_encoder_name: self.video_encoder_name.clone(),
video_parameters: self.video_parameters.clone(),
audio_encoder_name: self.audio_encoder_name.clone(),
audio_parameters: self.audio_parameters.clone(),
});
let closed_stream_id = stream_id.clone();
futures_channel.send_on_generic_unbounded_recv(
notification_receiver,
move |notification| FutureResult::TranscoderNotificationReceived {
stream_id: stream_id.clone(),
notification,
},
move || FutureResult::TranscoderNotificationSenderGone(closed_stream_id),
);
}
fn handle_media(
&mut self,
media: MediaNotification,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
self.start_transcode(
media.stream_id.clone(),
stream_name.clone(),
futures_channel,
);
outputs.media.push(media);
}
MediaNotificationContent::StreamDisconnected => {
self.stop_transcode(media.stream_id.clone());
outputs.media.push(media);
}
MediaNotificationContent::MediaPayload { .. } => {
if let Some(transcode) = self.active_transcodes.get(&media.stream_id) {
let _ = transcode.media_sender.send(media.content.clone());
}
}
MediaNotificationContent::Metadata { .. } => (),
}
}
fn handle_transcode_notification(
&mut self,
stream_id: StreamId,
notification: GstTranscoderNotification,
futures_channel: &WorkflowStepFuturesChannel,
) {
match notification {
GstTranscoderNotification::TranscodingStopped(cause) => {
let transcode = match self.active_transcodes.remove(&stream_id) {
Some(transcode) => transcode,
None => return,
};
if cause != GstTranscoderStoppedCause::StopRequested {
warn!(
stream_id = ?stream_id,
cause = ?cause,
"Transcoding unexpectedly stopped: {:?}", cause
);
// Since the stop wasn't requested, try restarting it
self.start_transcode(stream_id, transcode.stream_name, futures_channel);
}
}
GstTranscoderNotification::TranscodingStarted { output_media } => {
let closed_stream_id = stream_id.clone();
futures_channel.send_on_unbounded_recv(
output_media,
move |media| {
FuturesChannelInnerResult::Media(MediaNotification {
stream_id: stream_id.clone(),
content: media,
})
},
move || {
FuturesChannelInnerResult::Generic(Box::new(
FutureResult::TranscodedMediaChannelClosed(closed_stream_id),
))
},
);
}
}
}
}
impl WorkflowStep for BasicTranscodeStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for media in inputs.media.drain(..) {
self.handle_media(media, outputs, &futures_channel);
}
for future_result in inputs.notifications.drain(..) {
let future_result = match future_result.downcast::<FutureResult>() {
Ok(result) => result,
Err(_) => {
error!("Received future result that could not be casted to the internal future result type");
continue;
}
};
match *future_result {
FutureResult::TranscoderEndpointGone => {
self.stop_all_transcodes();
return StepStatus::Error {
message: "Transcoder endpoint went away".to_string(),
};
}
FutureResult::TranscoderNotificationSenderGone(stream_id) => {
error!(
stream_id = ?stream_id,
"Transcode notification sender for stream {:?} disappeared",
stream_id,
);
self.stop_transcode(stream_id);
}
FutureResult::TranscodedMediaChannelClosed(stream_id) => {
error!(
stream_id = ?stream_id,
"Sender of transcoded media for stream {:?} disappeared",
stream_id,
);
self.stop_transcode(stream_id);
}
FutureResult::TranscoderNotificationReceived {
notification,
stream_id,
} => {
self.handle_transcode_notification(stream_id, notification, &futures_channel);
}
}
}
StepStatus::Active
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/lib.rs | mmids-http-api/src/lib.rs | //! This crate provides ready to go mechanisms for interacting with mmids via an HTTP based
//! interface. This allows operators to view details of workflows, start workflows, stop workflows,
//! and also provides capabilities for custom mmids applications to provide their own HTTP
//! capabilities.
//!
//! Routes are defined by consumers, which define the code that should execute when that route
//! gets hit.
pub mod handlers;
pub mod routing;
use crate::routing::RoutingTable;
use hyper::header::HeaderName;
use hyper::server::conn::AddrStream;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server, StatusCode};
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::oneshot::{channel, Receiver, Sender};
use tracing::{error, info, instrument};
use uuid::Uuid;
pub struct HttpApiShutdownSignal {}
pub fn start_http_api(
bind_address: SocketAddr,
routes: RoutingTable,
) -> Sender<HttpApiShutdownSignal> {
let routes = Arc::new(routes);
let service = make_service_fn(move |socket: &AddrStream| {
let remote_address = socket.remote_addr();
let routes_clone = routes.clone();
async move {
Ok::<_, hyper::Error>(service_fn(move |request: Request<Body>| {
execute_request(
request,
remote_address,
routes_clone.clone(),
Uuid::new_v4().to_string(),
)
}))
}
});
let (sender, receiver) = channel();
let server = Server::bind(&bind_address)
.serve(service)
.with_graceful_shutdown(graceful_shutdown(receiver));
info!("Starting HTTP api on {}", bind_address);
tokio::spawn(async { server.await });
sender
}
async fn graceful_shutdown(shutdown_signal: Receiver<HttpApiShutdownSignal>) {
let _ = shutdown_signal.await;
}
#[instrument(
skip(request, client_address, routes),
fields(
http_method = %request.method(),
http_uri = %request.uri(),
client_ip = %client_address.ip(),
)
)]
async fn execute_request(
mut request: Request<Body>,
client_address: SocketAddr,
routes: Arc<RoutingTable>,
request_id: String,
) -> Result<Response<Body>, hyper::Error> {
info!(
"Incoming HTTP request for {} {} from {}",
request.method(),
request.uri(),
client_address.ip()
);
let started_at = Instant::now();
let parts = request
.uri()
.path()
.split('/')
.filter(|x| x.trim() != "")
.collect::<Vec<_>>();
match routes.get_route(request.method(), &parts) {
Some(route) => {
let parameters = route.get_parameters(&parts);
match route
.handler
.execute(&mut request, parameters, request_id.clone())
.await
{
Ok(mut response) => {
let elapsed = started_at.elapsed();
info!(
duration = %elapsed.as_millis(),
"Request returning status code {} in {} ms", response.status(), elapsed.as_millis()
);
let headers = response.headers_mut();
headers.insert(
HeaderName::from_lowercase(b"x-request-id").unwrap(),
request_id.parse().unwrap(),
);
Ok(response)
}
Err(error) => {
let elapsed = started_at.elapsed();
error!(
duration = %elapsed.as_millis(),
"Request thrown error: {:?}", error
);
Err(error)
}
}
}
None => {
info!("No route found for this URL, returning 404");
let mut response = Response::new(Body::from("Invalid URL"));
*response.status_mut() = StatusCode::NOT_FOUND;
Ok(response)
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/routing.rs | mmids-http-api/src/routing.rs | //! Provides mechanisms to define routes for the Mmids HTTP apis, and what code should be executed
//! for each route.
use async_trait::async_trait;
use hyper::{Body, Method, Request, Response};
use std::collections::HashMap;
/// Defines how a single fragment of the URL path should be read as. Each part is the whole value
/// between a `/` and either another `/` or the end of the string. Query parameters are not
/// considered.
#[derive(Clone)]
pub enum PathPart {
/// The fragment of the path should match this exact string value. This *is* case sensitive.
Exact { value: String },
/// The fragment of the path can match any string. The string value of this part of the path
/// will be stored as a parameter with the key being the specified name of the parameter, and
/// the value being the actual string in the path.
Parameter { name: String },
}
/// Represents code that will be executed for a given route.
///
/// Note: this trait uses the `async_trait` crate
#[async_trait]
pub trait RouteHandler {
/// Executes the handler for the specified HTTP request and pre-parsed path parameters.
///
/// Note that implementors can use `async_trait` to clean up the signature.
async fn execute(
&self,
request: &mut Request<Body>,
path_parameters: HashMap<String, String>,
request_id: String,
) -> Result<Response<Body>, hyper::Error>;
}
/// Defines the HTTP method, a specific path, and which handler should execute requests that match
/// the route.
pub struct Route {
pub method: Method,
pub path: Vec<PathPart>,
pub handler: Box<dyn RouteHandler + Sync + Send>,
}
/// Errors that can occur when registering new routes with the routing table
#[derive(thiserror::Error, Debug)]
pub enum RouteRegistrationError {
/// Raised when attempting to register a route whose http method and path parts match an
/// route that's already been registered.
#[error("A route is already registered that conflicts with this route")]
RouteConflict,
}
/// A system that contains all available routes. Routes may be registered with it and can then be
/// looked up from.
#[derive(Default)]
pub struct RoutingTable {
routes: HashMap<Method, RouteNode>,
}
#[derive(PartialEq, Eq, Hash)]
enum SearchablePathPart {
Exact(String),
Parameter,
}
struct RouteNode {
leaf: Option<Route>,
children: HashMap<SearchablePathPart, RouteNode>,
}
impl RoutingTable {
/// Creates an empty routing table
pub fn new() -> Self {
Default::default()
}
/// Registers a route to be available by the routing table
pub fn register(&mut self, route: Route) -> Result<(), RouteRegistrationError> {
let mut node = self
.routes
.entry(route.method.clone())
.or_insert(RouteNode {
leaf: None,
children: HashMap::new(),
});
for part in &route.path {
let searchable_part = match part {
PathPart::Exact { value: name } => SearchablePathPart::Exact(name.clone()),
PathPart::Parameter { .. } => SearchablePathPart::Parameter,
};
node = node.children.entry(searchable_part).or_insert(RouteNode {
leaf: None,
children: HashMap::new(),
});
}
if node.leaf.is_some() {
return Err(RouteRegistrationError::RouteConflict);
}
node.leaf = Some(route);
Ok(())
}
pub(super) fn get_route(&self, method: &Method, path_parts: &Vec<&str>) -> Option<&Route> {
let node = match self.routes.get(method) {
Some(node) => node,
None => return None,
};
find_route(0, path_parts, node)
}
}
fn find_route<'a>(
index: usize,
parts: &Vec<&str>,
current_node: &'a RouteNode,
) -> Option<&'a Route> {
if index >= parts.len() {
return match ¤t_node.leaf {
Some(route) => Some(route),
None => None,
};
}
if let Some(exact_child) = current_node
.children
.get(&SearchablePathPart::Exact(parts[index].to_string()))
{
if let Some(route) = find_route(index + 1, parts, exact_child) {
return Some(route);
}
}
if let Some(parameter_child) = current_node.children.get(&SearchablePathPart::Parameter) {
if let Some(route) = find_route(index + 1, parts, parameter_child) {
return Some(route);
}
}
None
}
impl Route {
pub(super) fn get_parameters(&self, path_parts: &[&str]) -> HashMap<String, String> {
let mut results = HashMap::new();
for (x, path_part) in self.path.iter().enumerate() {
if let PathPart::Parameter { name } = path_part {
results.insert(name.clone(), path_parts[x].to_string());
}
}
results
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/handlers/list_workflows.rs | mmids-http-api/src/handlers/list_workflows.rs | //! Contains the handler for getting a list of workflows
use crate::routing::RouteHandler;
use async_trait::async_trait;
use hyper::header::HeaderValue;
use hyper::{Body, Error, Request, Response, StatusCode};
use mmids_core::workflows::manager::{WorkflowManagerRequest, WorkflowManagerRequestOperation};
use serde::Serialize;
use std::collections::HashMap;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot::channel;
use tokio::time::timeout;
use tracing::error;
/// HTTP handler which provides a list of workflows that are actively running
pub struct ListWorkflowsHandler {
manager: UnboundedSender<WorkflowManagerRequest>,
}
/// Defines what data the API will return for each running workflow
#[derive(Serialize)]
pub struct WorkflowListItemResponse {
name: String,
}
impl ListWorkflowsHandler {
pub fn new(manager: UnboundedSender<WorkflowManagerRequest>) -> Self {
ListWorkflowsHandler { manager }
}
}
#[async_trait]
impl RouteHandler for ListWorkflowsHandler {
async fn execute(
&self,
_request: &mut Request<Body>,
_path_parameters: HashMap<String, String>,
request_id: String,
) -> Result<Response<Body>, Error> {
let (response_sender, response_receiver) = channel();
let message = WorkflowManagerRequest {
request_id,
operation: WorkflowManagerRequestOperation::GetRunningWorkflows {
response_channel: response_sender,
},
};
match self.manager.send(message) {
Ok(_) => (),
Err(_) => {
error!("Workflow manager is no longer operational");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
let response = match timeout(Duration::from_secs(10), response_receiver).await {
Ok(Ok(response)) => response,
Ok(Err(_)) => {
error!("Workflow manager is no longer operational");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
Err(_) => {
error!("Get workflow request timed out");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
let response = response
.into_iter()
.map(|x| WorkflowListItemResponse {
name: x.name.to_string(),
})
.collect::<Vec<_>>();
let json = match serde_json::to_string_pretty(&response) {
Ok(json) => json,
Err(error) => {
error!("Failed to serialize workflows to json: {:?}", error);
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
let mut response = Response::new(Body::from(json));
let headers = response.headers_mut();
headers.insert(
hyper::http::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
Ok(response)
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/handlers/stop_workflow.rs | mmids-http-api/src/handlers/stop_workflow.rs | //! Handler that allows a workflow to be stopped
use crate::routing::RouteHandler;
use async_trait::async_trait;
use hyper::{Body, Error, Request, Response, StatusCode};
use mmids_core::workflows::manager::{WorkflowManagerRequest, WorkflowManagerRequestOperation};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
use tracing::error;
/// Handles HTTP requests to stop a running workflow. It requires a single path parameter
/// named `workflow` that contains the name of the workflow to be stopped. It will always return
/// a 200 OK, even if the workflow isn't running.
pub struct StopWorkflowHandler {
manager: UnboundedSender<WorkflowManagerRequest>,
}
impl StopWorkflowHandler {
pub fn new(manager: UnboundedSender<WorkflowManagerRequest>) -> Self {
StopWorkflowHandler { manager }
}
}
#[async_trait]
impl RouteHandler for StopWorkflowHandler {
async fn execute(
&self,
_request: &mut Request<Body>,
path_parameters: HashMap<String, String>,
request_id: String,
) -> Result<Response<Body>, Error> {
let workflow_name = match path_parameters.get("workflow") {
Some(value) => Arc::new(value.to_string()),
None => {
error!("Get workflow endpoint called without a 'workflow' path parameter");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
match self.manager.send(WorkflowManagerRequest {
request_id,
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: workflow_name,
},
}) {
Ok(_) => (),
Err(_) => {
error!("Workflow manager endpoint gone");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
Ok(Response::default())
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/handlers/mod.rs | mmids-http-api/src/handlers/mod.rs | //! Contains pre-defined implementations of the `RouteHandler` traits for various functionality
pub mod get_workflow_details;
pub mod list_workflows;
pub mod start_workflow;
pub mod stop_workflow;
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/handlers/start_workflow.rs | mmids-http-api/src/handlers/start_workflow.rs | //! Contains the handler that creates and updates workflows
use crate::routing::RouteHandler;
use async_trait::async_trait;
use bytes::Bytes;
use hyper::http::HeaderValue;
use hyper::{Body, Error, Request, Response, StatusCode};
use mmids_core::workflows::definitions::WorkflowDefinition;
use mmids_core::workflows::manager::{WorkflowManagerRequest, WorkflowManagerRequestOperation};
use serde::Serialize;
use std::collections::HashMap;
use tokio::sync::mpsc::UnboundedSender;
use tracing::{error, warn};
const MMIDS_MIME_TYPE: &str = "application/vnd.mmids.workflow";
/// Handles requests to start a workflow. Every workflow must have a name, and if a workflow is
/// specified with a name that matches an already running workflow then the existing workflow
/// will be updated to match the passed in workflow instead of a new workflow starting.
///
/// A successful result does not mean that the workflow has fully started, only that the workflow
/// has been submitted to the workflow manager. Status updates of the workflow will need to be
/// queried to know if it successfully became active.
///
/// The details of the workflow are expected in the passed in via the request body. The format
/// that the details come in are based on the `Content-Type` header of the request:
///
/// * `application/vnd.mmids.workflow` - Worklow definition that matches how workflows are defined in
/// the mmids configuration files.
///
/// If no `Content-Type` is specified than `application/vnd.mmids.workflow` is assumed.
pub struct StartWorkflowHandler {
manager: UnboundedSender<WorkflowManagerRequest>,
}
/// Response provided when an error is returned, such as an invalid workflow specified
#[derive(Serialize)]
pub struct ErrorResponse {
pub error: String,
}
impl StartWorkflowHandler {
pub fn new(manager: UnboundedSender<WorkflowManagerRequest>) -> Self {
StartWorkflowHandler { manager }
}
}
#[async_trait]
impl RouteHandler for StartWorkflowHandler {
async fn execute(
&self,
request: &mut Request<Body>,
_path_parameters: HashMap<String, String>,
request_id: String,
) -> Result<Response<Body>, Error> {
let body = hyper::body::to_bytes(request.body_mut()).await?;
let content_type = match request.headers().get(hyper::http::header::CONTENT_TYPE) {
Some(content_type) => content_type.to_str().unwrap_or(MMIDS_MIME_TYPE),
None => {
warn!("No content type specified, assuming '{}'", MMIDS_MIME_TYPE);
MMIDS_MIME_TYPE
}
};
let workflow = match content_type.to_lowercase().trim() {
MMIDS_MIME_TYPE => parse_mmids_mime_type(body)?,
x => {
warn!("Invalid content type specified: '{}'", x);
let error = ErrorResponse {
error: format!("Invalid content type specified: {}", x),
};
return Ok(error.into_json_bad_request());
}
};
let workflow = match workflow {
Ok(workflow) => workflow,
Err(error) => {
return Ok(error.into_json_bad_request());
}
};
let result = self.manager.send(WorkflowManagerRequest {
request_id,
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: workflow,
},
});
match result {
Ok(_) => Ok(Response::default()),
Err(_) => {
error!("Workflow manager no longer exists");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
Ok(response)
}
}
}
}
impl ErrorResponse {
fn into_json_bad_request(self) -> Response<Body> {
let json = match serde_json::to_string_pretty(&self) {
Ok(json) => json,
Err(error) => {
error!("Failed to serialize error response to json: {:?}", error);
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return response;
}
};
let mut response = Response::new(Body::from(json));
*response.status_mut() = StatusCode::BAD_REQUEST;
let headers = response.headers_mut();
headers.insert(
hyper::http::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
response
}
}
fn parse_mmids_mime_type(body: Bytes) -> Result<Result<WorkflowDefinition, ErrorResponse>, Error> {
let content = match String::from_utf8(body.to_vec()) {
Ok(content) => content,
Err(utf8_error) => {
return Ok(Err(ErrorResponse {
error: format!("Body was not valid utf8 content: {}", utf8_error),
}));
}
};
let mut config = match mmids_core::config::parse(content.as_str()) {
Ok(config) => config,
Err(parse_error) => {
return Ok(Err(ErrorResponse {
error: format!("Failed to parse input: {:?}", parse_error),
}));
}
};
if config.workflows.len() > 1 {
return Ok(Err(ErrorResponse {
error: format!(
"Each request can only contain 1 workflow, {} were specified",
config.workflows.len()
),
}));
}
if config.workflows.is_empty() {
return Ok(Err(ErrorResponse {
error: "No workflows specified".to_string(),
}));
}
let workflow = config.workflows.drain().next().unwrap().1;
Ok(Ok(workflow))
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-http-api/src/handlers/get_workflow_details.rs | mmids-http-api/src/handlers/get_workflow_details.rs | //! Contains the handler for getting details about a running workflow
use crate::routing::RouteHandler;
use async_trait::async_trait;
use hyper::http::HeaderValue;
use hyper::{Body, Error, Request, Response, StatusCode};
use mmids_core::workflows::manager::{WorkflowManagerRequest, WorkflowManagerRequestOperation};
use mmids_core::workflows::steps::StepStatus;
use mmids_core::workflows::{WorkflowState, WorkflowStatus, WorkflowStepState};
use serde::Serialize;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot::channel;
use tokio::time::timeout;
use tracing::error;
/// Handles HTTP requests to get details for a specific workflow. It requires a single path
/// parameter with the name `workflow` containing the name of the workflow to query for. Response
/// will always be returned in json format.
pub struct GetWorkflowDetailsHandler {
manager: UnboundedSender<WorkflowManagerRequest>,
}
/// The API's response for the state of the requested workflow
#[derive(Serialize)]
pub struct WorkflowStateResponse {
status: String,
active_steps: Vec<WorkflowStepStateResponse>,
pending_steps: Vec<WorkflowStepStateResponse>,
}
/// API's response for the details of an individual workflow step
#[derive(Serialize)]
pub struct WorkflowStepStateResponse {
step_id: String,
step_type: String,
parameters: HashMap<String, Option<String>>,
status: String,
}
impl GetWorkflowDetailsHandler {
pub fn new(manager: UnboundedSender<WorkflowManagerRequest>) -> Self {
GetWorkflowDetailsHandler { manager }
}
}
#[async_trait]
impl RouteHandler for GetWorkflowDetailsHandler {
async fn execute(
&self,
_request: &mut Request<Body>,
path_parameters: HashMap<String, String>,
request_id: String,
) -> Result<Response<Body>, Error> {
let workflow_name = match path_parameters.get("workflow") {
Some(value) => value.to_string(),
None => {
error!("Get workflow endpoint called without a 'workflow' path parameter");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
let (sender, receiver) = channel();
let _ = self.manager.send(WorkflowManagerRequest {
request_id,
operation: WorkflowManagerRequestOperation::GetWorkflowDetails {
name: Arc::new(workflow_name),
response_channel: sender,
},
});
let details = match timeout(Duration::from_secs(1), receiver).await {
Ok(Ok(details)) => details,
Ok(Err(_)) => {
error!("Receiver was dropped prior to sending a response");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
Err(_) => {
error!("Request timed out");
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
let response = if let Some(details) = details {
let details = WorkflowStateResponse::from(details);
let json = match serde_json::to_string_pretty(&details) {
Ok(json) => json,
Err(e) => {
error!("Could not serialize workflow details response: {:?}", e);
let mut response = Response::default();
*response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
return Ok(response);
}
};
let mut response = Response::new(Body::from(json));
let headers = response.headers_mut();
headers.insert(
hyper::http::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
);
response
} else {
let mut response = Response::new(Body::from("Workflow not found"));
*response.status_mut() = StatusCode::NOT_FOUND;
response
};
Ok(response)
}
}
impl From<WorkflowState> for WorkflowStateResponse {
fn from(workflow: WorkflowState) -> Self {
WorkflowStateResponse {
status: match workflow.status {
WorkflowStatus::Running => "Running".to_string(),
WorkflowStatus::Error {
failed_step_id,
message,
} => format!("Step id {} failed: {}", failed_step_id, message),
},
active_steps: workflow
.active_steps
.into_iter()
.map(WorkflowStepStateResponse::from)
.collect(),
pending_steps: workflow
.pending_steps
.into_iter()
.map(WorkflowStepStateResponse::from)
.collect(),
}
}
}
impl From<WorkflowStepState> for WorkflowStepStateResponse {
fn from(step_state: WorkflowStepState) -> Self {
WorkflowStepStateResponse {
step_id: step_state.definition.get_id().0.to_string(),
step_type: step_state.definition.step_type.0,
parameters: step_state.definition.parameters,
status: match step_state.status {
StepStatus::Created => "Created".to_string(),
StepStatus::Active => "Active".to_string(),
StepStatus::Error { message } => format!("Error: {}", message),
StepStatus::Shutdown => "Shut Down".to_string(),
},
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/validators/rtmp-server/src/main.rs | validators/rtmp-server/src/main.rs | use log::{error, info, warn};
use mmids_core::net::tcp::start_socket_manager;
use mmids_rtmp::rtmp_server::{
start_rtmp_server_endpoint, IpRestriction, RtmpEndpointMediaData, RtmpEndpointMediaMessage,
RtmpEndpointPublisherMessage, RtmpEndpointRequest, RtmpEndpointWatcherNotification,
StreamKeyRegistration,
};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::unbounded_channel;
#[tokio::main()]
pub async fn main() {
env_logger::init();
info!("Starting rtmp server validator");
let socket_manager_sender = start_socket_manager(None);
let rtmp_server_sender = start_rtmp_server_endpoint(socket_manager_sender);
let (rtmp_response_sender, mut publish_notification_receiver) = unbounded_channel();
let _ = rtmp_server_sender.send(RtmpEndpointRequest::ListenForPublishers {
port: 1935,
rtmp_app: Arc::new("live".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
message_channel: rtmp_response_sender,
stream_id: None,
ip_restrictions: IpRestriction::None,
use_tls: false,
requires_registrant_approval: false,
});
info!("Requesting to listen for publish requests on port 1935 and app 'live'");
match publish_notification_receiver.recv().await {
Some(RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful) => (),
Some(x) => {
error!("Unexpected initial message: {:?}", x);
return;
}
None => {
error!("response receiver closed before any responses came");
return;
}
}
info!("Publish listen request successful");
let (notification_sender, mut watch_notification_receiver) = unbounded_channel();
let (media_sender, media_receiver) = unbounded_channel();
let _ = rtmp_server_sender.send(RtmpEndpointRequest::ListenForWatchers {
port: 1935,
rtmp_app: Arc::new("live".to_string()),
rtmp_stream_key: StreamKeyRegistration::Any,
media_channel: media_receiver,
notification_channel: notification_sender,
ip_restrictions: IpRestriction::None,
use_tls: false,
requires_registrant_approval: false,
});
info!("Requesting to listening for play requests on port 1935 and app 'live'");
match watch_notification_receiver.recv().await {
Some(RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful) => (),
Some(x) => {
error!("Unexpected initial watch message: {:?}", x);
return;
}
None => {
error!("Watch response receiver closed before any responses came");
return;
}
}
info!("Playback listen request successful");
let mut publisher_stream_key_map = HashMap::new();
let mut announce_video_data = true;
let mut announce_audio_data = true;
loop {
tokio::select! {
message = watch_notification_receiver.recv() => {
if message.is_none() {
break;
}
match message.unwrap() {
RtmpEndpointWatcherNotification::StreamKeyBecameActive {stream_key, ..} => {
info!("Stream key '{}' now has at least one watcher", stream_key);
}
RtmpEndpointWatcherNotification::StreamKeyBecameInactive {stream_key} => {
info!("Stream key '{}' no longer has any watchers", stream_key);
}
event => {
info!("Unexpected watcher notification: {:?}", event);
}
}
}
message = publish_notification_receiver.recv() => {
if message.is_none() {
break;
}
match message.unwrap() {
RtmpEndpointPublisherMessage::NewPublisherConnected {connection_id, stream_key, stream_id, ..} => {
info!("Connection {} connected as publisher for stream_key {} and stream id {:?}", connection_id, stream_key, stream_id);
publisher_stream_key_map.insert(connection_id, stream_key);
}
RtmpEndpointPublisherMessage::PublishingStopped {connection_id} => {
info!("Connection {} stopped publishing", connection_id);
}
RtmpEndpointPublisherMessage::StreamMetadataChanged {publisher, metadata} => {
info!("Connection {} sent new stream metadata: {:?}", publisher, metadata);
let stream_key = match publisher_stream_key_map.get(&publisher) {
Some(x) => x,
None => {
error!("Received stream metadata from unknown publisher with connection id {}", publisher);
continue;
}
};
let _ = media_sender.send(RtmpEndpointMediaMessage {
stream_key: (*stream_key).clone(),
data: RtmpEndpointMediaData::NewStreamMetaData {
metadata,
},
});
}
RtmpEndpointPublisherMessage::NewVideoData {
publisher,
data,
timestamp,
is_keyframe,
is_sequence_header,
composition_time_offset,
} => {
if announce_video_data {
info!("Connection {} sent video data", publisher);
announce_video_data = false;
}
let stream_key = match publisher_stream_key_map.get(&publisher) {
Some(x) => x,
None => {
error!("Received video from unknown publisher with connection id {}", publisher);
continue;
}
};
let _ = media_sender.send(RtmpEndpointMediaMessage {
stream_key: (*stream_key).clone(),
data: RtmpEndpointMediaData::NewVideoData {
data,
timestamp,
is_sequence_header,
is_keyframe,
composition_time_offset,
},
});
}
RtmpEndpointPublisherMessage::NewAudioData {
publisher,
data,
is_sequence_header,
timestamp,
} => {
if announce_audio_data {
info!("Connection {} sent audio data", publisher);
announce_audio_data = false;
}
let stream_key = match publisher_stream_key_map.get(&publisher) {
Some(x) => x,
None => {
error!("Received audio from unknown publisher with connection id {}", publisher);
continue;
}
};
let _ = media_sender.send(RtmpEndpointMediaMessage {
stream_key: (*stream_key).clone(),
data: RtmpEndpointMediaData::NewAudioData {
data,
is_sequence_header,
timestamp,
},
});
}
message => {
warn!("Unknown message received from publisher: {:?}", message);
}
}
}
}
}
info!("Terminating");
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/validators/echo-server/src/main.rs | validators/echo-server/src/main.rs | use bytes::{Bytes, BytesMut};
use futures::stream::FuturesUnordered;
use futures::{FutureExt, StreamExt};
use log::{debug, error, info, warn};
use mmids_core::net::tcp::{
start_socket_manager, OutboundPacket, TcpSocketRequest, TcpSocketResponse,
};
use mmids_core::net::ConnectionId;
use std::collections::HashMap;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
struct Connection {
sender: UnboundedSender<OutboundPacket>,
}
enum FutureResult {
TcpSocketResponse(TcpSocketResponse, UnboundedReceiver<TcpSocketResponse>),
BytesReceived(ConnectionId, Bytes, UnboundedReceiver<Bytes>),
ReceiverClosed,
ConnectionClosed(ConnectionId),
}
#[tokio::main]
pub async fn main() {
env_logger::init();
let socket_manager_sender = start_socket_manager(None);
let (response_sender, mut response_receiver) = unbounded_channel();
let message = TcpSocketRequest::OpenPort {
port: 8888,
response_channel: response_sender,
use_tls: false,
};
debug!("Opening port 8888");
match socket_manager_sender.send(message) {
Ok(_) => (),
Err(e) => panic!("Failed to send open port request: {}", e),
};
debug!("Waiting for response");
let response = match response_receiver.recv().await {
Some(response) => response,
None => panic!("No senders for tcp socket responses"),
};
match response {
TcpSocketResponse::RequestAccepted {} => (),
x => panic!("Unexpected response: {:?}", x),
};
let mut connections = HashMap::new();
let mut futures = FuturesUnordered::new();
futures.push(wait_for_responses(response_receiver).boxed());
let mut pending_received = BytesMut::new();
while let Some(result) = futures.next().await {
match result {
FutureResult::ReceiverClosed => {
error!("Receiver has no more senders");
break;
}
FutureResult::TcpSocketResponse(response, receiver) => {
futures.push(wait_for_responses(receiver).boxed());
match response {
TcpSocketResponse::NewConnection {
port: _,
connection_id,
outgoing_bytes,
incoming_bytes,
socket_address: _,
} => {
info!("New connection {:?}", connection_id);
let packet = OutboundPacket {
bytes: Bytes::from("Welcome!\n".to_string()),
can_be_dropped: false,
};
let _ = outgoing_bytes.send(packet);
let connection = Connection {
sender: outgoing_bytes,
};
connections.insert(connection_id.clone(), connection);
futures.push(wait_for_bytes(connection_id, incoming_bytes).boxed())
}
TcpSocketResponse::Disconnection { connection_id } => {
info!("Connection {:?} disconnected", connection_id);
connections.remove(&connection_id);
}
TcpSocketResponse::PortForciblyClosed { port: _ } => {
error!("Port was forcibly closed");
break;
}
x => warn!("Unexpected tcp response: {:?}", x),
}
}
FutureResult::ConnectionClosed(connection_id) => {
info!("Connection {:?} closed", connection_id);
connections.remove(&connection_id);
}
FutureResult::BytesReceived(connection_id, bytes, receiver) => {
futures.push(wait_for_bytes(connection_id.clone(), receiver).boxed());
pending_received.extend(bytes);
let mut index = None;
for x in 0..pending_received.len() {
if pending_received[x] == 10 {
index = Some(x);
break;
}
}
if let Some(index) = index {
let received = pending_received.split_to(index + 1);
if let Ok(string) = std::str::from_utf8(&received) {
info!(
"Received data from connection {:?}: {}",
connection_id,
string.trim()
);
let sender = match connections.get(&connection_id) {
Some(connection) => &connection.sender,
None => {
error!(
"Received packet for non-cataloged connection {:?}",
connection_id
);
break;
}
};
let response = format!("You said: {}", string);
let packet = OutboundPacket {
bytes: Bytes::from(response),
can_be_dropped: false,
};
let _ = sender.send(packet);
}
}
}
}
}
info!("Closing");
}
async fn wait_for_responses(mut receiver: UnboundedReceiver<TcpSocketResponse>) -> FutureResult {
match receiver.recv().await {
None => FutureResult::ReceiverClosed,
Some(x) => FutureResult::TcpSocketResponse(x, receiver),
}
}
async fn wait_for_bytes(
connection_id: ConnectionId,
mut receiver: UnboundedReceiver<Bytes>,
) -> FutureResult {
match receiver.recv().await {
None => FutureResult::ConnectionClosed(connection_id),
Some(bytes) => FutureResult::BytesReceived(connection_id, bytes, receiver),
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/validators/ffmpeg-runner/src/main.rs | validators/ffmpeg-runner/src/main.rs | use log::info;
use mmids_ffmpeg::endpoint::{
start_ffmpeg_endpoint, AudioTranscodeParams, FfmpegEndpointNotification, FfmpegEndpointRequest,
FfmpegParams, H264Preset, TargetParams, VideoScale, VideoTranscodeParams,
};
use tokio::sync::mpsc::unbounded_channel;
use uuid::Uuid;
#[tokio::main()]
pub async fn main() {
env_logger::init();
let endpoint = match start_ffmpeg_endpoint(
"c:\\users\\me\\tools\\ffmpeg\\bin\\ffmpeg.exe".to_string(),
"c:\\temp".to_string(),
) {
Ok(x) => x,
Err(e) => panic!("Error starting ffmpeg: {:?}", e),
};
info!("Ffmpeg runner started");
let (notification_sender, mut notification_receiver) = unbounded_channel();
let _ = endpoint.send(FfmpegEndpointRequest::StartFfmpeg {
id: Uuid::new_v4(),
params: hls_test(),
notification_channel: notification_sender,
});
match notification_receiver.recv().await {
None => panic!("ffmpeg endpoint is dead"),
Some(FfmpegEndpointNotification::FfmpegStopped) => panic!("Premature ffmpeg stop received"),
Some(FfmpegEndpointNotification::FfmpegFailedToStart { cause }) => {
panic!("Ffmpeg failed to start: {:?}", cause);
}
Some(FfmpegEndpointNotification::FfmpegStarted) => {
info!("Ffmpeg started as expected")
}
}
// wait for it to stop
match notification_receiver.recv().await {
None => panic!("ffmpeg endpoint died"),
Some(FfmpegEndpointNotification::FfmpegStarted) => {
panic!("Unexpected started notification received")
}
Some(FfmpegEndpointNotification::FfmpegFailedToStart { cause: _ }) => {
panic!("Unexpected start failure received")
}
Some(FfmpegEndpointNotification::FfmpegStopped) => {
info!("Received expected stopped notification");
}
}
}
fn hls_test() -> FfmpegParams {
FfmpegParams {
read_in_real_time: false,
input: "C:\\users\\me\\Documents\\bbb.flv".to_string(),
video_transcode: VideoTranscodeParams::H264 {
preset: H264Preset::UltraFast,
},
audio_transcode: AudioTranscodeParams::Aac,
scale: Some(VideoScale {
width: 640,
height: 480,
}),
bitrate_in_kbps: Some(3000),
target: TargetParams::Hls {
path: "c:\\temp\\test\\hlstest.m3u8".to_string(),
max_entries: None,
segment_length: 2,
},
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-app/src/main.rs | mmids-app/src/main.rs | mod http_handlers;
use hyper::Method;
use mmids_core::config::{parse as parse_config_file, MmidsConfig};
use mmids_core::event_hub::{start_event_hub, PublishEventRequest, SubscriptionRequest};
use mmids_core::net::tcp::{start_socket_manager, TlsOptions};
use mmids_core::reactors::executors::simple_http_executor::SimpleHttpExecutorGenerator;
use mmids_core::reactors::executors::ReactorExecutorFactory;
use mmids_core::reactors::manager::{
start_reactor_manager, CreateReactorResult, ReactorManagerRequest,
};
use mmids_core::workflows::definitions::WorkflowStepType;
use mmids_core::workflows::manager::{
start_workflow_manager, WorkflowManagerRequest, WorkflowManagerRequestOperation,
};
use mmids_core::workflows::metadata::common_metadata::{
get_is_keyframe_metadata_key, get_pts_offset_metadata_key,
};
use mmids_core::workflows::metadata::MetadataKeyMap;
use mmids_core::workflows::steps::factory::WorkflowStepFactory;
use mmids_core::workflows::steps::workflow_forwarder::WorkflowForwarderStepGenerator;
use mmids_ffmpeg::endpoint::{start_ffmpeg_endpoint, FfmpegEndpointRequest};
use mmids_ffmpeg::workflow_steps::ffmpeg_hls::FfmpegHlsStepGenerator;
use mmids_ffmpeg::workflow_steps::ffmpeg_pull::FfmpegPullStepGenerator;
use mmids_ffmpeg::workflow_steps::ffmpeg_rtmp_push::FfmpegRtmpPushStepGenerator;
use mmids_ffmpeg::workflow_steps::ffmpeg_transcode::FfmpegTranscoderStepGenerator;
use mmids_gstreamer::encoders::{
AudioCopyEncoderGenerator, AudioDropEncoderGenerator, AvencAacEncoderGenerator, EncoderFactory,
VideoCopyEncoderGenerator, VideoDropEncoderGenerator, X264EncoderGenerator,
};
use mmids_gstreamer::endpoints::gst_transcoder::{start_gst_transcoder, GstTranscoderRequest};
use mmids_gstreamer::steps::basic_transcoder::BasicTranscodeStepGenerator;
use mmids_http_api::handlers;
use mmids_http_api::routing::{PathPart, Route, RoutingTable};
use mmids_http_api::HttpApiShutdownSignal;
use mmids_rtmp::rtmp_server::{start_rtmp_server_endpoint, RtmpEndpointRequest};
use mmids_rtmp::workflow_steps::rtmp_receive::RtmpReceiverStepGenerator;
use mmids_rtmp::workflow_steps::rtmp_watch::RtmpWatchStepGenerator;
use native_tls::Identity;
use std::env;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot::{channel, Sender};
use tracing::{info, warn, Level};
use tracing_subscriber::fmt::writer::MakeWriterExt;
use tracing_subscriber::{fmt, layer::SubscriberExt};
const RTMP_RECEIVE: &str = "rtmp_receive";
const RTMP_WATCH: &str = "rtmp_watch";
const FORWARD_STEP: &str = "forward_to_workflow";
const BASIC_TRANSCODE_STEP: &str = "basic_transcode";
// ffmpeg steps will be depreciated at some point
const FFMPEG_TRANSCODE: &str = "ffmpeg_transcode";
const FFMPEG_HLS: &str = "ffmpeg_hls";
const FFMPEG_PUSH: &str = "ffmpeg_push";
const FFMPEG_PULL: &str = "ffmpeg_pull";
struct Endpoints {
rtmp: UnboundedSender<RtmpEndpointRequest>,
ffmpeg: UnboundedSender<FfmpegEndpointRequest>,
gst_transcoder: UnboundedSender<GstTranscoderRequest>,
}
#[tokio::main]
pub async fn main() {
// Start logging
let log_dir = get_log_directory();
let mut app_log_path = PathBuf::from(log_dir.clone());
app_log_path.push("application");
let log_level = match env::var("mmids_log") {
Ok(level) => match level.to_lowercase().as_str() {
"error" => Level::ERROR,
"warn" => Level::WARN,
"info" => Level::INFO,
"debug" => Level::DEBUG,
"trace" => Level::TRACE,
_ => Level::INFO,
},
Err(_) => Level::INFO,
};
let appender = tracing_appender::rolling::hourly(app_log_path.clone(), "application.log");
let (non_blocking, _guard) = tracing_appender::non_blocking(appender);
let stdout_writer = std::io::stdout.with_max_level(log_level);
let json_writer = non_blocking.with_max_level(log_level);
let subscriber = tracing_subscriber::registry()
.with(fmt::Layer::new().with_writer(stdout_writer).pretty())
.with(fmt::Layer::new().with_writer(json_writer).json());
tracing::subscriber::set_global_default(subscriber).expect("Unable to set a global collector");
info!("mmmids {} started", env!("CARGO_PKG_VERSION"));
info!("Logging to {}", app_log_path.display().to_string());
let mut metadata_key_map = MetadataKeyMap::new();
let config = read_config();
let tls_options = load_tls_options(&config).await;
let endpoints = start_endpoints(&config, tls_options, log_dir, &mut metadata_key_map);
let (pub_sender, sub_sender) = start_event_hub();
let reactor_manager = start_reactor(&config, sub_sender.clone()).await;
let step_factory = register_steps(
endpoints,
sub_sender,
reactor_manager,
&mut metadata_key_map,
);
let manager = start_workflows(&config, step_factory, pub_sender);
let http_api_shutdown = start_http_api(&config, manager);
tokio::signal::ctrl_c()
.await
.expect("Failed to install ctrl+c signal handler");
if let Some(sender) = http_api_shutdown {
let _ = sender.send(HttpApiShutdownSignal {});
}
}
fn read_config() -> MmidsConfig {
let contents = std::fs::read_to_string("mmids.config").expect("Failed to read 'mmids.config'");
return parse_config_file(contents.as_str()).expect("Failed to parse config file");
}
fn get_log_directory() -> String {
let log_dir = "logs";
let mut log_path = PathBuf::from(log_dir);
if log_path.is_relative() {
log_path = std::env::current_dir().expect("Failed to get current directory");
log_path.push(log_dir);
}
let log_dir = log_path.to_str().unwrap().to_string();
log_dir
}
fn register_steps(
endpoints: Endpoints,
subscription_sender: UnboundedSender<SubscriptionRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
metadata_key_map: &mut MetadataKeyMap,
) -> Arc<WorkflowStepFactory> {
info!("Starting workflow step factory, and adding known step types to it");
let is_keyframe_metadata_key = get_is_keyframe_metadata_key(metadata_key_map);
let pts_offset_metadata_key = get_pts_offset_metadata_key(metadata_key_map);
let mut step_factory = WorkflowStepFactory::new();
step_factory
.register(
WorkflowStepType(RTMP_RECEIVE.to_string()),
Box::new(RtmpReceiverStepGenerator::new(
endpoints.rtmp.clone(),
reactor_manager.clone(),
is_keyframe_metadata_key,
pts_offset_metadata_key,
)),
)
.expect("Failed to register rtmp_receive step");
step_factory
.register(
WorkflowStepType(RTMP_WATCH.to_string()),
Box::new(RtmpWatchStepGenerator::new(
endpoints.rtmp.clone(),
reactor_manager.clone(),
is_keyframe_metadata_key,
pts_offset_metadata_key,
)),
)
.expect("Failed to register rtmp_watch step");
step_factory
.register(
WorkflowStepType(FFMPEG_TRANSCODE.to_string()),
Box::new(FfmpegTranscoderStepGenerator::new(
endpoints.rtmp.clone(),
endpoints.ffmpeg.clone(),
is_keyframe_metadata_key,
pts_offset_metadata_key,
)),
)
.expect("Failed to register ffmpeg_transcode step");
step_factory
.register(
WorkflowStepType(FFMPEG_HLS.to_string()),
Box::new(FfmpegHlsStepGenerator::new(
endpoints.rtmp.clone(),
endpoints.ffmpeg.clone(),
is_keyframe_metadata_key,
pts_offset_metadata_key,
)),
)
.expect("Failed to register ffmpeg_hls step");
step_factory
.register(
WorkflowStepType(FFMPEG_PUSH.to_string()),
Box::new(FfmpegRtmpPushStepGenerator::new(
endpoints.rtmp.clone(),
endpoints.ffmpeg.clone(),
is_keyframe_metadata_key,
pts_offset_metadata_key,
)),
)
.expect("Failed to register ffmpeg_push step");
step_factory
.register(
WorkflowStepType(FFMPEG_PULL.to_string()),
Box::new(FfmpegPullStepGenerator::new(
endpoints.rtmp.clone(),
endpoints.ffmpeg.clone(),
is_keyframe_metadata_key,
pts_offset_metadata_key,
)),
)
.expect("Failed to register ffmpeg_push step");
step_factory
.register(
WorkflowStepType(FORWARD_STEP.to_string()),
Box::new(WorkflowForwarderStepGenerator::new(
subscription_sender,
reactor_manager,
)),
)
.expect("Failed to register forward_to_workflow step");
step_factory
.register(
WorkflowStepType(BASIC_TRANSCODE_STEP.to_string()),
Box::new(BasicTranscodeStepGenerator::new(endpoints.gst_transcoder)),
)
.expect("Failed to register the basic transcoder step");
Arc::new(step_factory)
}
async fn load_tls_options(config: &MmidsConfig) -> Option<TlsOptions> {
info!("Loading TLS options");
let cert_path = match config.settings.get("tls_cert_path") {
Some(Some(x)) => x.clone(),
_ => {
warn!("No certificate file specified. TLS not available");
return None;
}
};
let cert_password = match config.settings.get("tls_cert_password") {
Some(Some(x)) => x.clone(),
_ => {
panic!("Certificate file specified but no password given");
}
};
let mut file = match File::open(&cert_path).await {
Ok(file) => file,
Err(e) => panic!("Error reading pfx at '{}': {:?}", cert_path, e),
};
let mut file_content = Vec::new();
match file.read_to_end(&mut file_content).await {
Ok(_) => (),
Err(e) => panic!("Failed to open file {}: {:?}", cert_path, e),
}
let identity = match Identity::from_pkcs12(&file_content, cert_password.as_str()) {
Ok(identity) => identity,
Err(e) => panic!("Failed reading cert from '{}': {:?}", cert_path, e),
};
Some(TlsOptions {
certificate: identity,
})
}
fn start_endpoints(
config: &MmidsConfig,
tls_options: Option<TlsOptions>,
log_dir: String,
metadata_key_map: &mut MetadataKeyMap,
) -> Endpoints {
info!("Starting all endpoints");
let pts_offset_metadata_key = get_pts_offset_metadata_key(metadata_key_map);
let socket_manager = start_socket_manager(tls_options);
let rtmp_endpoint = start_rtmp_server_endpoint(socket_manager);
let ffmpeg_path = config
.settings
.get("ffmpeg_path")
.expect("No ffmpeg_path setting found")
.as_ref()
.expect("no ffmpeg path specified");
let ffmpeg_endpoint = start_ffmpeg_endpoint(ffmpeg_path.to_string(), log_dir)
.expect("Failed to start ffmpeg endpoint");
let mut encoder_factory = EncoderFactory::new();
encoder_factory
.register_video_encoder("drop", Box::new(VideoDropEncoderGenerator {}))
.expect("Failed to add video drop encoder");
encoder_factory
.register_video_encoder(
"copy",
Box::new(VideoCopyEncoderGenerator {
pts_offset_metadata_key,
}),
)
.expect("Failed to add video copy encoder");
encoder_factory
.register_video_encoder(
"x264",
Box::new(X264EncoderGenerator {
pts_offset_metadata_key,
}),
)
.expect("Failed to add the x264 encoder");
encoder_factory
.register_audio_encoder("drop", Box::new(AudioDropEncoderGenerator {}))
.expect("Failed to add the audio drop encoder");
encoder_factory
.register_audio_encoder("copy", Box::new(AudioCopyEncoderGenerator {}))
.expect("Failed to add the audio copy encoder");
encoder_factory
.register_audio_encoder("avenc_aac", Box::new(AvencAacEncoderGenerator {}))
.expect("Failed to add the avenc_aac encoder");
let gst_transcoder = start_gst_transcoder(Arc::new(encoder_factory), pts_offset_metadata_key)
.expect("Failed to start gst transcoder");
Endpoints {
rtmp: rtmp_endpoint,
ffmpeg: ffmpeg_endpoint,
gst_transcoder,
}
}
fn start_workflows(
config: &MmidsConfig,
step_factory: Arc<WorkflowStepFactory>,
event_hub_publisher: UnboundedSender<PublishEventRequest>,
) -> UnboundedSender<WorkflowManagerRequest> {
info!("Starting workflow manager");
let manager = start_workflow_manager(step_factory, event_hub_publisher);
for workflow in config.workflows.values() {
let _ = manager.send(WorkflowManagerRequest {
request_id: "mmids-app-startup".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: workflow.clone(),
},
});
}
manager
}
fn start_http_api(
config: &MmidsConfig,
manager: UnboundedSender<WorkflowManagerRequest>,
) -> Option<Sender<HttpApiShutdownSignal>> {
let port = match config.settings.get("http_api_port") {
Some(Some(value)) => match value.parse::<u16>() {
Ok(port) => port,
Err(_) => {
panic!("http_api_port value of '{}' is not a valid number", value);
}
},
_ => {
warn!("No `http_api_port` setting specified. HTTP api disabled");
return None;
}
};
let mut routes = RoutingTable::new();
routes
.register(Route {
method: Method::GET,
path: vec![PathPart::Exact {
value: "workflows".to_string(),
}],
handler: Box::new(handlers::list_workflows::ListWorkflowsHandler::new(
manager.clone(),
)),
})
.expect("Failed to register list workflows route");
routes
.register(Route {
method: Method::GET,
path: vec![
PathPart::Exact {
value: "workflows".to_string(),
},
PathPart::Parameter {
name: "workflow".to_string(),
},
],
handler: Box::new(
handlers::get_workflow_details::GetWorkflowDetailsHandler::new(manager.clone()),
),
})
.expect("Failed to register get workflow details route");
routes
.register(Route {
method: Method::DELETE,
path: vec![
PathPart::Exact {
value: "workflows".to_string(),
},
PathPart::Parameter {
name: "workflow".to_string(),
},
],
handler: Box::new(handlers::stop_workflow::StopWorkflowHandler::new(
manager.clone(),
)),
})
.expect("Failed to register stop workflow route");
routes
.register(Route {
method: Method::PUT,
path: vec![PathPart::Exact {
value: "workflows".to_string(),
}],
handler: Box::new(handlers::start_workflow::StartWorkflowHandler::new(manager)),
})
.expect("Failed to register start workflow route");
routes
.register(Route {
method: Method::GET,
path: Vec::new(),
handler: Box::new(http_handlers::VersionHandler),
})
.expect("Failed to register version route");
let addr = ([127, 0, 0, 1], port).into();
Some(mmids_http_api::start_http_api(addr, routes))
}
async fn start_reactor(
config: &MmidsConfig,
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
) -> UnboundedSender<ReactorManagerRequest> {
let mut factory = ReactorExecutorFactory::new();
factory
.register(
"simple_http".to_string(),
Box::new(SimpleHttpExecutorGenerator {}),
)
.expect("Failed to add simple_http reactor executor");
let reactor_manager = start_reactor_manager(factory, event_hub_subscriber.clone());
for (name, definition) in &config.reactors {
let (sender, receiver) = channel();
let _ = reactor_manager.send(ReactorManagerRequest::CreateReactor {
definition: definition.clone(),
response_channel: sender,
});
match receiver.await {
Ok(CreateReactorResult::Success) => (),
Ok(error) => panic!("Failed to start reactor {}: {:?}", name, error),
Err(_) => panic!("Reactor manager closed unexpectedly"),
}
}
reactor_manager
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-app/src/http_handlers.rs | mmids-app/src/http_handlers.rs | use async_trait::async_trait;
use hyper::{Body, Error, Request, Response};
use mmids_http_api::routing::RouteHandler;
use std::collections::HashMap;
pub struct VersionHandler;
#[async_trait]
impl RouteHandler for VersionHandler {
async fn execute(
&self,
_request: &mut Request<Body>,
_path_parameters: HashMap<String, String>,
_request_id: String,
) -> Result<Response<Body>, Error> {
let output = format!("Mmids version {}", env!("CARGO_PKG_VERSION"));
return Ok(Response::new(Body::from(output)));
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/reactor-test-server/src/main.rs | reactor-test-server/src/main.rs | use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Result, Server, StatusCode};
use serde::Deserialize;
use tokio::fs::File;
use tokio_util::codec::{BytesCodec, FramedRead};
const DIRECTORY: &str = "workflows";
#[derive(Deserialize)]
struct RequestContent {
stream_name: String,
}
#[tokio::main]
async fn main() {
let addr = "127.0.0.1:9055".parse().unwrap();
let make_service =
make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(get_response)) });
let server = Server::bind(&addr).serve(make_service);
println!("Listening on http://{}", addr);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn get_response(req: Request<Body>) -> Result<Response<Body>> {
match (req.method(), req.uri().path()) {
(&Method::POST, "/") => Ok(not_found()),
(&Method::POST, path) => {
let sub_directory = (path[1..]).to_string();
let whole_body = hyper::body::to_bytes(req.into_body()).await?;
let content: RequestContent = match serde_json::from_slice(&whole_body) {
Ok(content) => content,
Err(error) => {
println!("Error parsing json from body: {:?}", error);
return Ok(Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(format!("Error parsing json from body: {:?}", error).into())
.unwrap());
}
};
println!(
"Request came in for {}/'{}'",
sub_directory, content.stream_name
);
let path = format!(
"{}/{}/{}.mmids",
DIRECTORY, sub_directory, content.stream_name
);
if let Ok(file) = File::open(&path).await {
let stream = FramedRead::new(file, BytesCodec::new());
let body = Body::wrap_stream(stream);
return Ok(Response::new(body));
}
println!("File '{}' not found", path);
Ok(not_found())
}
_ => Ok(not_found()),
}
}
fn not_found() -> Response<Body> {
Response::builder()
.status(StatusCode::NOT_FOUND)
.body("not found".into())
.unwrap()
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/config.rs | mmids-core/src/config.rs | use crate::reactors::ReactorDefinition;
use crate::workflows::definitions::{WorkflowDefinition, WorkflowStepDefinition, WorkflowStepType};
use pest::iterators::{Pair, Pairs};
use pest::Parser;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
use tracing::warn;
/// Configuration for a Mmids system. Defines the settings and any workflows that should be active.
pub struct MmidsConfig {
pub settings: HashMap<String, Option<String>>,
pub reactors: HashMap<Arc<String>, ReactorDefinition>,
pub workflows: HashMap<Arc<String>, WorkflowDefinition>,
}
/// Errors that can occur when parsing a configuration entry
#[derive(Error, Debug)]
pub enum ConfigParseError {
#[error("The config provided could not be parsed")]
InvalidConfig(#[from] pest::error::Error<Rule>),
#[error("Found unexpected rule '{rule:?}' in the {section} section")]
UnexpectedRule { rule: Rule, section: String },
#[error("Duplicate workflow name: '{name}'")]
DuplicateWorkflowName { name: Arc<String> },
#[error("Invalid node name '{name}' on line {line}")]
InvalidNodeName { name: String, line: usize },
#[error("Arguments are not allowed on a settings node, but some were found on line {line}")]
ArgumentsSpecifiedOnSettingNode { line: usize },
#[error("More than 1 argument was provided for the setting on line {line}")]
TooManySettingArguments { line: usize },
#[error("The argument provided for the setting on line {line} is invalid. Equal signs are not allowed")]
InvalidSettingArgumentFormat { line: usize },
#[error(
"The `routed_by_reactor` argument on line {line} is invalid. Equal signs are not allowed"
)]
InvalidRoutedByReactorArgument { line: usize },
#[error("The workflow on line {line} did not have a name specified")]
NoNameOnWorkflow { line: usize },
#[error("Invalid workflow name of {name} on line {line}")]
InvalidWorkflowName { line: usize, name: String },
#[error("The reactor on line {line} did not have a name specified")]
NoNameOnReactor { line: usize },
#[error("Invalid workflow name of '{name}' on line {line}")]
InvalidReactorName { line: usize, name: String },
#[error("The reactor on line {line} has an invalid update_interval value of '{argument}'. This value must be a number")]
InvalidUpdateIntervalValue { line: usize, argument: String },
#[error(
"The reactor parameter's value on line {line} is invalid. Equal signs are not allowed"
)]
InvalidReactorParameterValueFormat { line: usize },
#[error("The reactor parameter on line {line} had multiple values. Only 1 is allowed")]
TooManyReactorParameterValues { line: usize },
#[error("Multiple reactors have the name of '{name}'. Each reactor must have a unique name")]
DuplicateReactorName { name: Arc<String> },
#[error("The executor on line {line} did not have an executor specified")]
NoExecutorForReactor { line: usize },
}
#[derive(Parser)]
#[grammar = "config.pest"]
struct RawConfigParser;
struct ChildNode {
name: String,
arguments: HashMap<String, Option<String>>,
}
/// Parses configuration from a text block.
pub fn parse(content: &str) -> Result<MmidsConfig, Box<ConfigParseError>> {
let mut config = MmidsConfig {
settings: HashMap::new(),
reactors: HashMap::new(),
workflows: HashMap::new(),
};
let pairs = RawConfigParser::parse(Rule::content, content)
.map_err(|error| Box::new(ConfigParseError::InvalidConfig(error)))?;
for pair in pairs {
let rule = pair.as_rule();
match &rule {
Rule::node_block => handle_node_block(&mut config, pair)?,
Rule::EOI => (),
x => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule: *x,
section: "root".to_string(),
}))
}
}
}
Ok(config)
}
fn handle_node_block(
config: &mut MmidsConfig,
pair: Pair<Rule>,
) -> Result<(), Box<ConfigParseError>> {
let mut rules = pair.into_inner();
let name_node = rules.next().unwrap(); // grammar requires a node name
let name = name_node.as_str().trim();
match name.to_lowercase().as_str() {
"settings" => read_settings(config, rules)?,
"workflow" => read_workflow(config, rules, name_node.as_span().start_pos().line_col().0)?,
"reactor" => read_reactor(config, rules, name_node.as_span().start_pos().line_col().0)?,
_ => {
return Err(Box::new(ConfigParseError::InvalidNodeName {
name: name.to_string(),
line: name_node.as_span().start_pos().line_col().0,
}));
}
}
Ok(())
}
fn read_settings(
config: &mut MmidsConfig,
pairs: Pairs<Rule>,
) -> Result<(), Box<ConfigParseError>> {
for pair in pairs {
match pair.as_rule() {
Rule::child_node => {
let child_node = read_child_node(pair.clone())?;
if child_node.arguments.len() > 1 {
return Err(Box::new(ConfigParseError::TooManySettingArguments {
line: get_line_number(&pair),
}));
}
if let Some(key) = child_node.arguments.keys().next() {
if let Some(Some(_value)) = child_node.arguments.get(key) {
return Err(Box::new(ConfigParseError::InvalidSettingArgumentFormat {
line: get_line_number(&pair),
}));
}
config.settings.insert(child_node.name, Some(key.clone()));
} else {
config.settings.insert(child_node.name, None);
}
}
Rule::argument => {
return Err(Box::new(
ConfigParseError::ArgumentsSpecifiedOnSettingNode {
line: get_line_number(&pair),
},
));
}
rule => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule,
section: "settings".to_string(),
}));
}
}
}
Ok(())
}
fn read_workflow(
config: &mut MmidsConfig,
pairs: Pairs<Rule>,
starting_line: usize,
) -> Result<(), Box<ConfigParseError>> {
let mut steps = Vec::new();
let mut workflow_name = None;
let mut routed_by_reactor = false;
for pair in pairs {
match pair.as_rule() {
Rule::child_node => {
let child_node = read_child_node(pair)?;
steps.push(WorkflowStepDefinition {
step_type: WorkflowStepType(child_node.name),
parameters: child_node.arguments,
});
}
Rule::argument => {
let (key, value) = read_argument(pair.clone())?;
if workflow_name.is_some() {
if &key == "routed_by_reactor" {
if value.is_some() {
return Err(Box::new(
ConfigParseError::InvalidRoutedByReactorArgument {
line: get_line_number(&pair),
},
));
}
routed_by_reactor = true;
} else {
let line = get_line_number(&pair);
warn!(
workflow_name = %workflow_name.as_ref().unwrap(),
line = %line,
argument = %key,
"Unknown argument '{}' for workflow {} on line {}",
key, workflow_name.as_ref().unwrap(), line,
);
}
} else {
if value.is_some() {
return Err(Box::new(ConfigParseError::InvalidWorkflowName {
name: pair.as_str().to_string(),
line: get_line_number(&pair),
}));
}
workflow_name = Some(Arc::new(key));
}
}
rule => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule,
section: "workflow".to_string(),
}));
}
}
}
if let Some(name) = workflow_name {
if config.workflows.contains_key(&name) {
return Err(Box::new(ConfigParseError::DuplicateWorkflowName { name }));
}
config.workflows.insert(
name.clone(),
WorkflowDefinition {
name,
steps,
routed_by_reactor,
},
);
} else {
return Err(Box::new(ConfigParseError::NoNameOnWorkflow {
line: starting_line,
}));
}
Ok(())
}
fn read_reactor(
config: &mut MmidsConfig,
pairs: Pairs<Rule>,
starting_line: usize,
) -> Result<(), Box<ConfigParseError>> {
let mut name = None;
let mut parameters = HashMap::new();
let mut executor_name = None;
let mut update_interval = 0;
for pair in pairs {
match pair.as_rule() {
Rule::argument => {
let (key, value) = read_argument(pair.clone())?;
if name.is_none() {
// Name must come first and only have a key, no pair
if value.is_some() {
return Err(Box::new(ConfigParseError::InvalidReactorName {
line: get_line_number(&pair),
name: pair.as_str().to_string(),
}));
}
name = Some(Arc::new(key));
} else if key == "executor" {
if let Some(value) = value {
executor_name = Some(value);
}
} else if key == "update_interval" {
if let Some(value) = value {
if let Ok(num) = value.parse() {
update_interval = num;
} else {
return Err(Box::new(ConfigParseError::InvalidUpdateIntervalValue {
line: get_line_number(&pair),
argument: value,
}));
}
} else {
return Err(Box::new(ConfigParseError::InvalidUpdateIntervalValue {
line: get_line_number(&pair),
argument: "".to_string(),
}));
}
} else {
let line = get_line_number(&pair);
warn!(
line = %line,
argument = %key,
reactor_name = %name.as_ref().unwrap(),
"Unknown argument '{}' for reactor {} on line {}",
key, name.as_ref().unwrap(), line,
);
}
}
Rule::child_node => {
let line_number = pair.as_span().start_pos().line_col().0;
let child_node = read_child_node(pair)?;
if child_node.arguments.len() > 1 {
return Err(Box::new(ConfigParseError::TooManyReactorParameterValues {
line: line_number,
}));
}
if let Some(key) = child_node.arguments.keys().next() {
if let Some(Some(_)) = child_node.arguments.get(key) {
return Err(Box::new(
ConfigParseError::InvalidReactorParameterValueFormat {
line: line_number,
},
));
}
parameters.insert(child_node.name, Some(key.clone()));
} else {
parameters.insert(child_node.name, None);
}
}
rule => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule,
section: "settings".to_string(),
}));
}
}
}
if let Some(name) = name {
if config.reactors.contains_key(&name) {
return Err(Box::new(ConfigParseError::DuplicateReactorName { name }));
}
if let Some(executor) = executor_name {
config.reactors.insert(
name.clone(),
ReactorDefinition {
name,
parameters,
executor,
update_interval: Duration::from_secs(update_interval),
},
);
} else {
return Err(Box::new(ConfigParseError::NoExecutorForReactor {
line: starting_line,
}));
}
} else {
return Err(Box::new(ConfigParseError::NoNameOnReactor {
line: starting_line,
}));
}
Ok(())
}
fn read_argument(pair: Pair<Rule>) -> Result<(String, Option<String>), Box<ConfigParseError>> {
let result;
// Each argument should have a single child rule based on grammar
let argument = pair.into_inner().next().unwrap();
match argument.as_rule() {
Rule::argument_flag => {
result = (argument.as_str().to_string(), None);
}
Rule::quoted_string_value => {
result = (argument.as_str().to_string(), None);
}
Rule::key_value_pair => {
let mut key = "".to_string();
let mut value = "".to_string();
for inner in argument.into_inner() {
match inner.as_rule() {
Rule::key => key = inner.as_str().to_string(),
Rule::value => {
// If this is a quotes string value, we need to unquote it, otherwise
// use the value as-is
value = inner
.clone()
.into_inner()
.filter(|p| p.as_rule() == Rule::quoted_string_value)
.map(|p| p.as_str().to_string())
.next()
.unwrap_or_else(|| inner.as_str().to_string());
}
rule => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule,
section: "argument".to_string(),
}))
}
}
}
result = (key, Some(value));
}
_ => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule: argument.as_rule(),
section: "child_node argument".to_string(),
}));
}
}
Ok(result)
}
fn read_child_node(child_node: Pair<Rule>) -> Result<ChildNode, Box<ConfigParseError>> {
let mut pairs = child_node.into_inner();
let name_node = pairs.next().unwrap(); // Grammar requires a node name first
let mut parsed_node = ChildNode {
name: name_node.as_str().to_string(),
arguments: HashMap::new(),
};
for pair in pairs {
match pair.as_rule() {
Rule::argument => {
let (key, value) = read_argument(pair)?;
parsed_node.arguments.insert(key, value);
}
rule => {
return Err(Box::new(ConfigParseError::UnexpectedRule {
rule,
section: "child_node".to_string(),
}));
}
}
}
Ok(parsed_node)
}
fn get_line_number(node: &Pair<Rule>) -> usize {
node.as_span().start_pos().line_col().0
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_parse_settings() {
let content = "
settings {
first a
second \"C:\\program files\\ffmpeg\\bin\\ffmpeg.exe\"
flag
}
";
let config = parse(content).unwrap();
assert_eq!(config.settings.len(), 3, "Unexpected number of settings");
assert_eq!(
config.settings.get("first"),
Some(&Some("a".to_string())),
"Unexpected first value"
);
assert_eq!(
config.settings.get("second"),
Some(&Some(
"C:\\program files\\ffmpeg\\bin\\ffmpeg.exe".to_string()
)),
"Unexpected second value"
);
assert_eq!(
config.settings.get("flag"),
Some(&None),
"Unexpected flag value"
);
}
#[test]
fn can_read_single_workflow() {
let content = "
workflow name {
rtmp_receive port=1935 app=receive stream_key=*
hls path=c:\\temp\\test.m3u8 segment_size=\"3\" size=640x480 flag
}
";
let config = parse(content).unwrap();
assert_eq!(config.workflows.len(), 1, "Unexpected number of workflows");
assert!(
config.workflows.contains_key(&Arc::new("name".to_string())),
"workflow 'name' did not exist"
);
let workflow = config.workflows.get(&Arc::new("name".to_string())).unwrap();
assert_eq!(workflow.name.as_str(), "name", "Unexpected workflow name");
assert_eq!(
workflow.steps.len(),
2,
"Unexpected number of workflow steps"
);
assert!(
!workflow.routed_by_reactor,
"Expected routed by reactor to be false"
);
let step1 = workflow.steps.get(0).unwrap();
assert_eq!(
step1.step_type.0,
"rtmp_receive".to_string(),
"Unexpected type of step 1"
);
assert_eq!(step1.parameters.len(), 3, "Unexpected number of parameters");
assert_eq!(
step1.parameters.get("port"),
Some(&Some("1935".to_string())),
"Unexpected step 1 port value"
);
assert_eq!(
step1.parameters.get("app"),
Some(&Some("receive".to_string())),
"Unexpected step 1 app value"
);
assert_eq!(
step1.parameters.get("stream_key"),
Some(&Some("*".to_string())),
"Unexpected step 1 stream_key value"
);
let step2 = workflow.steps.get(1).unwrap();
assert_eq!(
step2.step_type.0,
"hls".to_string(),
"Unexpected type of step 1"
);
assert_eq!(step2.parameters.len(), 4, "Unexpected number of parameters");
assert_eq!(
step2.parameters.get("path"),
Some(&Some("c:\\temp\\test.m3u8".to_string())),
"Unexpected step 2 path value"
);
assert_eq!(
step2.parameters.get("segment_size"),
Some(&Some("3".to_string())),
"Unexpected step 2 segment_size value"
);
assert_eq!(
step2.parameters.get("size"),
Some(&Some("640x480".to_string())),
"Unexpected step 2 size value"
);
assert_eq!(
step2.parameters.get("flag"),
Some(&None),
"Unexpected step 2 flag value"
);
}
#[test]
fn can_read_multiple_workflows() {
let content = "
workflow name {
rtmp_receive port=1935 app=receive stream_key=*
hls path=c:\\temp\\test.m3u8 segment_size=\"3\" size=640x480 flag
}
workflow name2 {
another a
}
";
let config = parse(content).unwrap();
assert_eq!(config.workflows.len(), 2, "Unexpected number of workflows");
assert!(
config.workflows.contains_key(&Arc::new("name".to_string())),
"Could not find a workflow named 'name'"
);
assert!(
config
.workflows
.contains_key(&Arc::new("name2".to_string())),
"Could not find a workflow named 'name2'"
);
}
#[test]
fn can_read_single_reactor() {
let content = "
reactor name executor=abc {
param1 value
param2 value2
}
";
let config = parse(content).unwrap();
assert_eq!(config.reactors.len(), 1, "Unexpected number of reactors");
assert!(
config.reactors.contains_key(&Arc::new("name".to_string())),
"Reactor in config did not have the expected name"
);
let reactor = &config.reactors[&Arc::new("name".to_string())];
assert_eq!(reactor.name.as_str(), "name", "Unexpected name of reactor");
assert_eq!(reactor.executor, "abc".to_string(), "Unexpected executor");
assert_eq!(
reactor.parameters.len(),
2,
"Unexpected number of parameters"
);
assert_eq!(
reactor.parameters.get("param1"),
Some(&Some("value".to_string())),
"Unexpected param1 value"
);
assert_eq!(
reactor.parameters.get("param2"),
Some(&Some("value2".to_string())),
"Unexpected param2 value"
);
}
#[test]
fn duplicate_workflow_name_returns_error() {
let content = "
workflow name {
rtmp_receive port=1935 app=receive stream_key=*
hls path=c:\\temp\\test.m3u8 segment_size=\"3\" size=640x480 flag
}
workflow name {
another a
}
";
match parse(content) {
Err(error) => match *error {
ConfigParseError::DuplicateWorkflowName { name } => {
if name.as_str() != "name" {
panic!("Unexpected name in workflow: '{}'", name);
}
}
other => panic!(
"Expected duplicate workflow name error, instead got: {:?}",
other
),
},
Ok(_) => panic!("Received successful parse, but an error was expected"),
}
}
#[test]
fn full_config_can_be_parsed() {
let content = "
# comment
settings {
first a # another comment
second \"C:\\program files\\ffmpeg\\bin\\ffmpeg.exe\"
flag
}
workflow name { #workflow comment
rtmp_receive port=1935 app=receive stream_key=* #step comment
hls path=c:\\temp\\test.m3u8 segment_size=\"3\" size=640x480 flag
}
workflow name2 {
another a
}
";
parse(content).unwrap();
}
#[test]
fn can_parse_routed_by_reactor_argument_on_workflow() {
let content = "
workflow name routed_by_reactor {
rtmp_receive port=1935 app=receive stream_key=*
}
";
let config = parse(content).unwrap();
let workflow = config.workflows.get(&Arc::new("name".to_string())).unwrap();
assert!(
workflow.routed_by_reactor,
"Expected routed by workflow to be true"
);
}
#[test]
fn comments_can_have_greater_than_or_less_than_signs() {
let content = "
settings {
# <test>
}
";
parse(content).unwrap();
}
#[test]
fn comments_can_have_back_ticks() {
let content = "\
settings {
# `test `
}
";
parse(content).unwrap();
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/actor_utils.rs | mmids-core/src/actor_utils.rs | //! Utilities useful for actor implementations.
use std::future::Future;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
/// Watches a tokio `UnboundedReceiver` for a message, and when a message is received sends that
/// message to the actor via the `received_message` transformation function.
pub fn notify_on_unbounded_recv<RecvMessage, ActorMessage>(
mut receiver: UnboundedReceiver<RecvMessage>,
actor_channel: UnboundedSender<ActorMessage>,
received_message: impl Fn(RecvMessage) -> ActorMessage + Send + 'static,
closed_message: impl FnOnce() -> ActorMessage + Send + 'static,
) where
RecvMessage: Send + 'static,
ActorMessage: Send + 'static,
{
tokio::spawn(async move {
loop {
tokio::select! {
received = receiver.recv() => {
match received {
Some(msg) => {
let actor_msg = received_message(msg);
let _ = actor_channel.send(actor_msg);
}
None => {
let actor_msg = closed_message();
let _ = actor_channel.send(actor_msg);
break;
}
}
}
_ = actor_channel.closed() => {
break;
}
}
}
});
}
/// Watches a tokio `UnboundedSender` to be notified when the channel closes. Once the channel
/// is closed it will send the specified message to the actor.
pub fn notify_on_unbounded_closed<SenderMessage, ActorMessage>(
sender: UnboundedSender<SenderMessage>,
actor_channel: UnboundedSender<ActorMessage>,
closed_message: impl FnOnce() -> ActorMessage + Send + 'static,
) where
SenderMessage: Send + 'static,
ActorMessage: Send + 'static,
{
tokio::spawn(async move {
tokio::select! {
_ = sender.closed() => {
let actor_msg = closed_message();
let _ = actor_channel.send(actor_msg);
}
_ = actor_channel.closed() => {
// Can't send a message anywhere so just stop.
}
}
});
}
/// Allows notifying an actor when any arbitrary future is resolved.
pub fn notify_on_future_completion<FutureResult, ActorMessage>(
future: impl Future<Output = FutureResult> + Send + 'static,
actor_channel: UnboundedSender<ActorMessage>,
completion_message: impl FnOnce(FutureResult) -> ActorMessage + Send + 'static,
) where
FutureResult: Send + 'static,
ActorMessage: Send + 'static,
{
tokio::spawn(async move {
tokio::select! {
result = future => {
let actor_msg = completion_message(result);
let _ = actor_channel.send(actor_msg);
}
_ = actor_channel.closed() => {
// Can't send a message so just end
}
}
});
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/lib.rs | mmids-core/src/lib.rs | //! This crate contains all the building blocks and foundational systems that a mmids application
//! requires. It also contains standard workflow steps that are likely to be used in most
//! mmids applications.
extern crate pest;
#[macro_use]
extern crate pest_derive;
use std::num::Wrapping;
use std::sync::Arc;
use std::time::Duration;
use tracing::error;
pub mod actor_utils;
pub mod codecs;
pub mod config;
pub mod event_hub;
pub mod net;
pub mod reactors;
#[cfg(feature = "test-utils")]
pub mod test_utils;
pub mod workflows;
/// Unique identifier that identifies the flow of video end-to-end. Normally when media data enters
/// the beginning of a workflow it will be given a unique stream identifier, and it will keep that
/// identifier until it leaves the last stage of the workflow. This allows for logging to give
/// visibility of how media is processed throughout it's all lifetime.
///
/// If a workflow has a step that requires media to leave the system and then come back in for
/// further steps, than it should keep the same stream identifier. For example, if
/// a workflow has an ffmpeg transcoding step in the workflow (e.g. to add a watermark), when
/// ffmpeg pushes the video back in it will keep the same identifier.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct StreamId(pub Arc<String>);
/// Represents timestamps relevant to video data. Contains the decoding time stamp (dts) and
/// presentation time stamp (dts).
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct VideoTimestamp {
dts: Duration,
pts_offset: i32,
}
impl VideoTimestamp {
/// Creates a new video timestamp based on absolute dts and pts values.
pub fn from_durations(dts: Duration, pts: Duration) -> Self {
let mut pts_offset = pts.as_millis() as i64 - dts.as_millis() as i64;
if !(-8388608..838607).contains(&pts_offset) {
error!("PTS ({pts:?}) and DTS ({dts:?}) differ by more than a 24 bit number. Setting pts = dts");
pts_offset = 0;
}
VideoTimestamp {
dts,
pts_offset: pts_offset as i32,
}
}
/// Creates a video timestamp at zero
pub fn from_zero() -> Self {
VideoTimestamp {
dts: Duration::new(0, 0),
pts_offset: 0,
}
}
/// Gets the decoding time stamp for this video packet
pub fn dts(&self) -> Duration {
self.dts
}
/// Gets the presentation time stamp for the video packet
pub fn pts(&self) -> Duration {
let mut dts = Wrapping(self.dts.as_millis() as u64);
if self.pts_offset > 0 {
dts += Wrapping(self.pts_offset as u64);
} else {
dts -= Wrapping((-self.pts_offset) as u64);
}
Duration::from_millis(dts.0)
}
/// Gets the offset from the decoding timestamp for the pts
pub fn pts_offset(&self) -> i32 {
self.pts_offset
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/test_utils.rs | mmids-core/src/test_utils.rs | use futures::future::BoxFuture;
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use std::fmt::Debug;
use std::time::Duration;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::oneshot::Receiver;
use tokio::time::timeout;
pub async fn expect_mpsc_response<T>(receiver: &mut UnboundedReceiver<T>) -> T {
match timeout(Duration::from_millis(10), receiver.recv()).await {
Ok(Some(response)) => response,
Ok(None) => panic!("Channel unexpectedly closed"),
Err(_) => panic!("No response received within timeout period"),
}
}
pub async fn expect_oneshot_response<T>(receiver: Receiver<T>) -> T {
match timeout(Duration::from_millis(10), receiver).await {
Ok(Ok(response)) => response,
Ok(Err(_)) => panic!("Channel unexpectedly closed"),
Err(_) => panic!("No response received within timeout period"),
}
}
pub async fn expect_mpsc_timeout<T>(receiver: &mut UnboundedReceiver<T>)
where
T: Debug,
{
match timeout(Duration::from_millis(10), receiver.recv()).await {
Ok(Some(response)) => panic!("Expected timeout, instead received {:?}", response),
Ok(None) => panic!("Channel unexpectedly closed"),
Err(_) => (),
}
}
pub async fn expect_future_resolved<T>(futures: &mut FuturesUnordered<BoxFuture<'static, T>>) -> T {
match timeout(Duration::from_millis(10), futures.next()).await {
Ok(Some(response)) => response,
_ => panic!("No future resolved within timeout period"),
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/codecs.rs | mmids-core/src/codecs.rs | //! Standard codec identifiers
use lazy_static::lazy_static;
use std::sync::Arc;
lazy_static! {
pub static ref VIDEO_CODEC_H264_AVC: Arc<String> = Arc::new("h264-avc".to_string());
pub static ref AUDIO_CODEC_AAC_RAW: Arc<String> = Arc::new("aac-raw".to_string());
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/event_hub.rs | mmids-core/src/event_hub.rs | //! The event hub is a central actor that receives events from all type of mmids subsystems and
//! allows them to be published to interested subscribers.
use crate::actor_utils::{notify_on_unbounded_closed, notify_on_unbounded_recv};
use crate::workflows::manager::WorkflowManagerRequest;
use crate::workflows::WorkflowRequest;
use std::collections::{HashMap, HashSet};
use std::num::Wrapping;
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tracing::{info, instrument, warn};
/// A request to publish a notification to the event hub
#[derive(Debug)]
pub enum PublishEventRequest {
WorkflowStartedOrStopped(WorkflowStartedOrStoppedEvent),
WorkflowManagerEvent(WorkflowManagerEvent),
}
/// A request to subscribe to a category of events
#[derive(Debug)]
pub enum SubscriptionRequest {
WorkflowStartedOrStopped {
channel: UnboundedSender<WorkflowStartedOrStoppedEvent>,
},
WorkflowManagerEvents {
channel: UnboundedSender<WorkflowManagerEvent>,
},
}
/// Events relating to workflows being started or stopped
#[derive(Clone, Debug)]
pub enum WorkflowStartedOrStoppedEvent {
WorkflowStarted {
name: Arc<String>,
channel: UnboundedSender<WorkflowRequest>,
},
WorkflowEnded {
name: Arc<String>,
},
}
// Events relating to workflow managers
#[derive(Clone, Debug)]
pub enum WorkflowManagerEvent {
WorkflowManagerRegistered {
channel: UnboundedSender<WorkflowManagerRequest>,
},
}
pub fn start_event_hub() -> (
UnboundedSender<PublishEventRequest>,
UnboundedSender<SubscriptionRequest>,
) {
let (publish_sender, publish_receiver) = unbounded_channel();
let (sub_sender, sub_receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = Actor::new(publish_receiver, sub_receiver, actor_sender);
tokio::spawn(actor.run(actor_receiver));
(publish_sender, sub_sender)
}
enum FutureResult {
AllPublishConsumersGone,
AllSubscriptionRequestConsumersGone,
NewPublishRequest(PublishEventRequest),
NewSubscriptionRequest(SubscriptionRequest),
WorkflowStartStopSubscriberGone(usize),
WorkflowManagerSubscriberGone(usize),
}
struct Actor {
internal_sender: UnboundedSender<FutureResult>,
next_subscriber_id: Wrapping<usize>,
active_subscriber_ids: HashSet<usize>,
workflow_start_stop_subscribers: HashMap<usize, UnboundedSender<WorkflowStartedOrStoppedEvent>>,
workflow_manager_subscribers: HashMap<usize, UnboundedSender<WorkflowManagerEvent>>,
new_subscribers_can_join: bool,
active_workflows: HashMap<Arc<String>, UnboundedSender<WorkflowRequest>>,
active_workflow_manager: Option<UnboundedSender<WorkflowManagerRequest>>,
}
impl Actor {
fn new(
publish_receiver: UnboundedReceiver<PublishEventRequest>,
subscribe_receiver: UnboundedReceiver<SubscriptionRequest>,
actor_sender: UnboundedSender<FutureResult>,
) -> Self {
notify_on_unbounded_recv(
publish_receiver,
actor_sender.clone(),
FutureResult::NewPublishRequest,
|| FutureResult::AllPublishConsumersGone,
);
notify_on_unbounded_recv(
subscribe_receiver,
actor_sender.clone(),
FutureResult::NewSubscriptionRequest,
|| FutureResult::AllSubscriptionRequestConsumersGone,
);
Actor {
internal_sender: actor_sender,
next_subscriber_id: Wrapping(0),
active_subscriber_ids: HashSet::new(),
workflow_start_stop_subscribers: HashMap::new(),
workflow_manager_subscribers: HashMap::new(),
new_subscribers_can_join: true,
active_workflows: HashMap::new(),
active_workflow_manager: None,
}
}
#[instrument(name = "Event Hub Execution", skip(self))]
async fn run(mut self, mut receiver: UnboundedReceiver<FutureResult>) {
info!("Starting event hub");
while let Some(result) = receiver.recv().await {
match result {
FutureResult::AllPublishConsumersGone => {
info!("All publish request consumers are gone. No new events can come in");
break;
}
FutureResult::AllSubscriptionRequestConsumersGone => {
warn!("All subscription request consumers gone. No new subscribers can join");
// Theoretically this should only happen when everything is shutting down. I
// guess technically we might still have valid subscribers to send new events to
// still so we don't have to shut this down until all subscribers are gone
self.new_subscribers_can_join = false;
}
FutureResult::WorkflowStartStopSubscriberGone(id) => {
self.active_subscriber_ids.remove(&id);
self.workflow_start_stop_subscribers.remove(&id);
}
FutureResult::WorkflowManagerSubscriberGone(id) => {
self.active_subscriber_ids.remove(&id);
self.workflow_manager_subscribers.remove(&id);
}
FutureResult::NewPublishRequest(request) => {
self.handle_publish_request(request);
}
FutureResult::NewSubscriptionRequest(request) => {
self.handle_subscription_request(request);
}
}
if !self.new_subscribers_can_join && self.total_subscriber_count() == 0 {
info!("All subscribers are gone and no new subscribers can join. Closing");
break;
}
}
info!("Closing event hub");
}
fn handle_publish_request(&mut self, request: PublishEventRequest) {
match request {
PublishEventRequest::WorkflowStartedOrStopped(event) => {
for subscriber in self.workflow_start_stop_subscribers.values() {
let _ = subscriber.send(event.clone());
}
// We want to maintain a list of active workflows, so if a subscriber joins after
// we receive the notification of a workflow starting they don't miss that event.
match event {
WorkflowStartedOrStoppedEvent::WorkflowStarted { name, channel } => {
self.active_workflows.insert(name, channel);
}
WorkflowStartedOrStoppedEvent::WorkflowEnded { name } => {
self.active_workflows.remove(&name);
}
}
}
PublishEventRequest::WorkflowManagerEvent(event) => {
for subscriber in self.workflow_manager_subscribers.values() {
let _ = subscriber.send(event.clone());
}
match event {
WorkflowManagerEvent::WorkflowManagerRegistered { channel } => {
self.active_workflow_manager = Some(channel);
}
}
}
}
}
fn handle_subscription_request(&mut self, request: SubscriptionRequest) {
let id = self.next_subscriber_id;
self.active_subscriber_ids.insert(id.0);
loop {
self.next_subscriber_id += Wrapping(1);
if !self
.active_subscriber_ids
.contains(&self.next_subscriber_id.0)
{
break;
}
}
match request {
SubscriptionRequest::WorkflowStartedOrStopped { channel } => {
for (name, workflow_channel) in &self.active_workflows {
let _ = channel.send(WorkflowStartedOrStoppedEvent::WorkflowStarted {
name: name.clone(),
channel: workflow_channel.clone(),
});
}
self.workflow_start_stop_subscribers
.insert(id.0, channel.clone());
notify_on_unbounded_closed(channel, self.internal_sender.clone(), move || {
FutureResult::WorkflowStartStopSubscriberGone(id.0)
});
}
SubscriptionRequest::WorkflowManagerEvents { channel } => {
if let Some(sender) = &self.active_workflow_manager {
let _ = channel.send(WorkflowManagerEvent::WorkflowManagerRegistered {
channel: sender.clone(),
});
}
self.workflow_manager_subscribers
.insert(id.0, channel.clone());
notify_on_unbounded_closed(channel, self.internal_sender.clone(), move || {
FutureResult::WorkflowManagerSubscriberGone(id.0)
});
}
}
}
fn total_subscriber_count(&self) -> usize {
self.workflow_start_stop_subscribers.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils;
use std::time::Duration;
#[tokio::test]
async fn can_receive_workflow_started_notifications() {
let (publish_channel, subscribe_channel) = start_event_hub();
let (subscriber_sender, mut subscriber_receiver) = unbounded_channel();
let (workflow_sender, _workflow_receiver) = unbounded_channel();
subscribe_channel
.send(SubscriptionRequest::WorkflowStartedOrStopped {
channel: subscriber_sender,
})
.expect("Failed to subscribe to workflow start/stop events");
tokio::time::sleep(Duration::from_millis(10)).await;
publish_channel
.send(PublishEventRequest::WorkflowStartedOrStopped(
WorkflowStartedOrStoppedEvent::WorkflowStarted {
name: Arc::new("test".to_string()),
channel: workflow_sender,
},
))
.expect("Failed to publish workflow started event");
let response = test_utils::expect_mpsc_response(&mut subscriber_receiver).await;
match response {
WorkflowStartedOrStoppedEvent::WorkflowStarted { name, channel: _ } => {
assert_eq!(name.as_str(), "test", "Unexpected workflow name");
}
event => panic!("Unexpected event received: {:?}", event),
}
}
#[tokio::test]
async fn can_receive_workflow_started_notification_when_subscribed_after_published() {
let (publish_channel, subscribe_channel) = start_event_hub();
let (subscriber_sender, mut subscriber_receiver) = unbounded_channel();
let (workflow_sender, _workflow_receiver) = unbounded_channel();
publish_channel
.send(PublishEventRequest::WorkflowStartedOrStopped(
WorkflowStartedOrStoppedEvent::WorkflowStarted {
name: Arc::new("test".to_string()),
channel: workflow_sender,
},
))
.expect("Failed to publish workflow started event");
tokio::time::sleep(Duration::from_millis(10)).await;
subscribe_channel
.send(SubscriptionRequest::WorkflowStartedOrStopped {
channel: subscriber_sender,
})
.expect("Failed to subscribe to workflow start/stop events");
let response = test_utils::expect_mpsc_response(&mut subscriber_receiver).await;
match response {
WorkflowStartedOrStoppedEvent::WorkflowStarted { name, channel: _ } => {
assert_eq!(name.as_str(), "test", "Unexpected workflow name");
}
event => panic!("Unexpected event received: {:?}", event),
}
}
#[tokio::test]
async fn can_receive_workflow_stopped_notifications() {
let (publish_channel, subscribe_channel) = start_event_hub();
let (subscriber_sender, mut subscriber_receiver) = unbounded_channel();
subscribe_channel
.send(SubscriptionRequest::WorkflowStartedOrStopped {
channel: subscriber_sender,
})
.expect("Failed to subscribe to workflow start/stop events");
tokio::time::sleep(Duration::from_millis(10)).await;
publish_channel
.send(PublishEventRequest::WorkflowStartedOrStopped(
WorkflowStartedOrStoppedEvent::WorkflowEnded {
name: Arc::new("test".to_string()),
},
))
.expect("Failed to publish workflow ended event");
let response = test_utils::expect_mpsc_response(&mut subscriber_receiver).await;
match response {
WorkflowStartedOrStoppedEvent::WorkflowEnded { name } => {
assert_eq!(name.as_str(), "test", "Unexpected workflow name");
}
event => panic!("Unexpected event received: {:?}", event),
}
}
#[tokio::test]
async fn no_events_when_workflow_started_and_stopped_prior_to_subscription() {
let (publish_channel, subscribe_channel) = start_event_hub();
let (subscriber_sender, mut subscriber_receiver) = unbounded_channel();
let (workflow_sender, _workflow_receiver) = unbounded_channel();
publish_channel
.send(PublishEventRequest::WorkflowStartedOrStopped(
WorkflowStartedOrStoppedEvent::WorkflowStarted {
name: Arc::new("test".to_string()),
channel: workflow_sender,
},
))
.expect("Failed to publish workflow started event");
publish_channel
.send(PublishEventRequest::WorkflowStartedOrStopped(
WorkflowStartedOrStoppedEvent::WorkflowEnded {
name: Arc::new("test".to_string()),
},
))
.expect("Failed to publish workflow ended event");
tokio::time::sleep(Duration::from_millis(10)).await;
subscribe_channel
.send(SubscriptionRequest::WorkflowStartedOrStopped {
channel: subscriber_sender,
})
.expect("Failed to subscribe to workflow start/stop events");
test_utils::expect_mpsc_timeout(&mut subscriber_receiver).await;
}
#[tokio::test]
async fn can_receive_workflow_manager_registered_event() {
let (publish_channel, subscribe_channel) = start_event_hub();
let (subscriber_sender, mut subscriber_receiver) = unbounded_channel();
let (manager_sender, _manager_receiver) = unbounded_channel();
subscribe_channel
.send(SubscriptionRequest::WorkflowManagerEvents {
channel: subscriber_sender,
})
.expect("Failed to send subscription request");
tokio::time::sleep(Duration::from_millis(10)).await;
publish_channel
.send(PublishEventRequest::WorkflowManagerEvent(
WorkflowManagerEvent::WorkflowManagerRegistered {
channel: manager_sender,
},
))
.expect("Failed to send publish request");
let response = test_utils::expect_mpsc_response(&mut subscriber_receiver).await;
match response {
WorkflowManagerEvent::WorkflowManagerRegistered { channel: _ } => (),
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/manager.rs | mmids-core/src/workflows/manager.rs | //! A workflow manager is a centralized actor that orchestrates multiple workflows. It can be
//! used to start new workflows, change the steps of a managed workflow, get status the of managed
//! workflows, and stop a managed workflow.
use crate::actor_utils::{notify_on_unbounded_closed, notify_on_unbounded_recv};
use crate::event_hub::{PublishEventRequest, WorkflowManagerEvent, WorkflowStartedOrStoppedEvent};
use crate::workflows::definitions::WorkflowDefinition;
use crate::workflows::runner::{WorkflowRequestOperation, WorkflowState};
use crate::workflows::steps::factory::WorkflowStepFactory;
use crate::workflows::{start_workflow, WorkflowRequest};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::Sender;
use tracing::{info, instrument, warn};
/// Requests an action be taken by the workflow manager
#[derive(Debug)]
pub struct WorkflowManagerRequest {
/// An identifier that can identify this request. Mostly used for correlations
pub request_id: String,
/// The specific operation being requested of the workflow manager
pub operation: WorkflowManagerRequestOperation,
}
/// Operations consumers can request the workflow manager to perform
#[derive(Debug)]
pub enum WorkflowManagerRequestOperation {
/// Starts or updates a specified workflow based on the passed in definition
UpsertWorkflow { definition: WorkflowDefinition },
/// Stops the specified workflow, if it is running
StopWorkflow { name: Arc<String> },
/// Requests information about all workflows currently running
GetRunningWorkflows {
response_channel: Sender<Vec<GetWorkflowResponse>>,
},
/// Requests details about a specific workflow
GetWorkflowDetails {
name: Arc<String>,
response_channel: Sender<Option<WorkflowState>>,
},
}
#[derive(Debug)]
pub struct GetWorkflowResponse {
pub name: Arc<String>,
}
pub fn start_workflow_manager(
step_factory: Arc<WorkflowStepFactory>,
event_hub_publisher: UnboundedSender<PublishEventRequest>,
) -> UnboundedSender<WorkflowManagerRequest> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = Actor::new(step_factory, event_hub_publisher, receiver, actor_sender);
tokio::spawn(actor.run(sender.clone(), actor_receiver));
sender
}
enum FutureResult {
AllConsumersGone,
EventHubGone,
WorkflowManagerRequestReceived(WorkflowManagerRequest),
WorkflowGone(Arc<String>),
}
struct Actor {
internal_sender: UnboundedSender<FutureResult>,
workflows: HashMap<Arc<String>, UnboundedSender<WorkflowRequest>>,
step_factory: Arc<WorkflowStepFactory>,
event_hub_publisher: UnboundedSender<PublishEventRequest>,
}
impl Actor {
fn new(
step_factory: Arc<WorkflowStepFactory>,
event_hub_publisher: UnboundedSender<PublishEventRequest>,
request_receiver: UnboundedReceiver<WorkflowManagerRequest>,
actor_sender: UnboundedSender<FutureResult>,
) -> Self {
notify_on_unbounded_recv(
request_receiver,
actor_sender.clone(),
FutureResult::WorkflowManagerRequestReceived,
|| FutureResult::AllConsumersGone,
);
Actor {
internal_sender: actor_sender,
workflows: HashMap::new(),
step_factory,
event_hub_publisher,
}
}
#[instrument(name = "Workflow Manager Execution", skip_all)]
async fn run(
mut self,
request_sender: UnboundedSender<WorkflowManagerRequest>,
mut actor_receiver: UnboundedReceiver<FutureResult>,
) {
notify_on_unbounded_closed(
self.event_hub_publisher.clone(),
self.internal_sender.clone(),
|| FutureResult::EventHubGone,
);
info!("Starting workflow manager");
let _ = self
.event_hub_publisher
.send(PublishEventRequest::WorkflowManagerEvent(
WorkflowManagerEvent::WorkflowManagerRegistered {
channel: request_sender,
},
));
while let Some(result) = actor_receiver.recv().await {
match result {
FutureResult::AllConsumersGone => {
info!("All consumers gone");
break;
}
FutureResult::EventHubGone => {
warn!("Event hub is gone");
break;
}
FutureResult::WorkflowManagerRequestReceived(request) => {
self.handle_request(request);
}
FutureResult::WorkflowGone(name) => {
if self.workflows.remove(&name).is_some() {
let event =
WorkflowStartedOrStoppedEvent::WorkflowEnded { name: name.clone() };
let _ = self
.event_hub_publisher
.send(PublishEventRequest::WorkflowStartedOrStopped(event));
warn!(
workflow_name = %name,
"Workflow '{}' had its request channel disappear", name
);
}
}
}
}
info!("Workflow manager closing")
}
#[instrument(skip(self, request), fields(request_id = %request.request_id))]
fn handle_request(&mut self, request: WorkflowManagerRequest) {
match request.operation {
WorkflowManagerRequestOperation::UpsertWorkflow { definition } => {
if let Some(sender) = self.workflows.get_mut(&definition.name) {
info!(
workflow_name = %definition.name,
"Updating existing workflow '{}' with new definition", definition.name,
);
let _ = sender.send(WorkflowRequest {
request_id: request.request_id,
operation: WorkflowRequestOperation::UpdateDefinition {
new_definition: definition,
},
});
} else {
info!(
workflow_name = %definition.name,
"Starting workflow '{}'", definition.name,
);
let name = definition.name.clone();
let sender = start_workflow(definition, self.step_factory.clone());
let on_closed_name = name.clone();
notify_on_unbounded_closed(
sender.clone(),
self.internal_sender.clone(),
|| FutureResult::WorkflowGone(on_closed_name),
);
self.workflows.insert(name.clone(), sender.clone());
let event = WorkflowStartedOrStoppedEvent::WorkflowStarted {
name,
channel: sender,
};
let _ = self
.event_hub_publisher
.send(PublishEventRequest::WorkflowStartedOrStopped(event));
}
}
WorkflowManagerRequestOperation::StopWorkflow { name } => {
info!(
workflow_name = %name,
"Stopping workflow '{}'", name,
);
if let Some(sender) = self.workflows.remove(&name) {
let _ = sender.send(WorkflowRequest {
request_id: request.request_id,
operation: WorkflowRequestOperation::StopWorkflow,
});
let event = WorkflowStartedOrStoppedEvent::WorkflowEnded { name: name.clone() };
let _ = self
.event_hub_publisher
.send(PublishEventRequest::WorkflowStartedOrStopped(event));
}
}
WorkflowManagerRequestOperation::GetRunningWorkflows { response_channel } => {
let mut response = self
.workflows
.keys()
.map(|x| GetWorkflowResponse { name: x.clone() })
.collect::<Vec<_>>();
response.sort_by(|a, b| b.name.cmp(&a.name));
let _ = response_channel.send(response);
}
WorkflowManagerRequestOperation::GetWorkflowDetails {
name,
response_channel,
} => match self.workflows.get(&name) {
None => {
let _ = response_channel.send(None);
}
Some(sender) => {
let _ = sender.send(WorkflowRequest {
request_id: request.request_id,
operation: WorkflowRequestOperation::GetState { response_channel },
});
}
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils;
use tokio::sync::oneshot::channel;
struct TestContext {
event_hub: UnboundedReceiver<PublishEventRequest>,
manager: UnboundedSender<WorkflowManagerRequest>,
}
impl TestContext {
fn new() -> Self {
let (sender, receiver) = unbounded_channel();
let factory = Arc::new(WorkflowStepFactory::new());
let manager = start_workflow_manager(factory, sender);
TestContext {
event_hub: receiver,
manager,
}
}
}
#[tokio::test]
async fn new_workflow_manager_registers_with_event_hub() {
let mut context = TestContext::new();
let event = test_utils::expect_mpsc_response(&mut context.event_hub).await;
match event {
PublishEventRequest::WorkflowManagerEvent(event) => match event {
WorkflowManagerEvent::WorkflowManagerRegistered { channel: _ } => (),
},
event => panic!("Expected workflow manager event, instead got {:?}", event),
}
}
#[tokio::test]
async fn created_workflow_has_event_published() {
let mut context = TestContext::new();
test_utils::expect_mpsc_response(&mut context.event_hub).await; // manager registered event
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let event = test_utils::expect_mpsc_response(&mut context.event_hub).await;
match event {
PublishEventRequest::WorkflowStartedOrStopped(event) => match event {
WorkflowStartedOrStoppedEvent::WorkflowStarted { name, channel: _ } => {
assert_eq!(name.as_str(), "workflow", "Unexpected workflow name");
}
event => panic!("Unexpected workflow event received: {:?}", event),
},
event => panic!("Unexpected publish event received; {:?}", event),
}
test_utils::expect_mpsc_timeout(&mut context.event_hub).await;
}
#[tokio::test]
async fn created_workflow_shows_in_workflow_list() {
let context = TestContext::new();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let (sender, receiver) = channel();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::GetRunningWorkflows {
response_channel: sender,
},
})
.expect("failed to send list workflow request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert_eq!(response.len(), 1, "Unexpected number of workflows");
assert_eq!(
response[0].name.as_str(),
"workflow",
"Unexpected workflow name"
);
}
#[tokio::test]
async fn can_get_details_of_created_workflow() {
let context = TestContext::new();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let (sender, receiver) = channel();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::GetWorkflowDetails {
name: Arc::new("workflow".to_string()),
response_channel: sender,
},
})
.expect("failed to send list workflow request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(
response.is_some(),
"Expected workflow details to be returned"
);
}
#[tokio::test]
async fn second_upsert_request_does_not_send_second_stated_event() {
let mut context = TestContext::new();
test_utils::expect_mpsc_response(&mut context.event_hub).await; // manager registered event
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let _ = test_utils::expect_mpsc_response(&mut context.event_hub).await;
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
test_utils::expect_mpsc_timeout(&mut context.event_hub).await;
}
#[tokio::test]
async fn second_created_workflow_does_not_duplicate_in_workflow_list() {
let context = TestContext::new();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let (sender, receiver) = channel();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::GetRunningWorkflows {
response_channel: sender,
},
})
.expect("failed to send list workflow request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert_eq!(response.len(), 1, "Unexpected number of workflows");
assert_eq!(
response[0].name.as_str(),
"workflow",
"Unexpected workflow name"
);
}
#[tokio::test]
async fn stopping_workflow_sends_stopped_event() {
let mut context = TestContext::new();
test_utils::expect_mpsc_response(&mut context.event_hub).await; // manager registered event
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let _ = test_utils::expect_mpsc_response(&mut context.event_hub).await;
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: Arc::new("workflow".to_string()),
},
})
.expect("Failed to send stop command");
let event = test_utils::expect_mpsc_response(&mut context.event_hub).await;
match event {
PublishEventRequest::WorkflowStartedOrStopped(event) => match event {
WorkflowStartedOrStoppedEvent::WorkflowEnded { name } => {
assert_eq!(name.as_str(), "workflow", "Unexpected workflow name");
}
event => panic!("Unexpected workflow event received: {:?}", event),
},
event => panic!("Unexpected publish event received; {:?}", event),
}
test_utils::expect_mpsc_timeout(&mut context.event_hub).await;
}
#[tokio::test]
async fn stopped_workflow_does_not_show_in_workflow_list() {
let mut context = TestContext::new();
test_utils::expect_mpsc_response(&mut context.event_hub).await; // manager registered event
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let _ = test_utils::expect_mpsc_response(&mut context.event_hub).await;
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: Arc::new("workflow".to_string()),
},
})
.expect("Failed to send stop command");
let _ = test_utils::expect_mpsc_response(&mut context.event_hub).await;
let (sender, receiver) = channel();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::GetRunningWorkflows {
response_channel: sender,
},
})
.expect("Failed to send get running workflow request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_empty(), "Expected empty workflow list");
}
#[tokio::test]
async fn no_details_returned_for_stopped_workflow() {
let mut context = TestContext::new();
test_utils::expect_mpsc_response(&mut context.event_hub).await; // manager registered event
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: WorkflowDefinition {
name: Arc::new("workflow".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
},
},
})
.expect("Failed to send upsert request");
let _ = test_utils::expect_mpsc_response(&mut context.event_hub).await;
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: Arc::new("workflow".to_string()),
},
})
.expect("Failed to send stop command");
let _ = test_utils::expect_mpsc_response(&mut context.event_hub).await;
let (sender, receiver) = channel();
context
.manager
.send(WorkflowManagerRequest {
request_id: "".to_string(),
operation: WorkflowManagerRequestOperation::GetWorkflowDetails {
name: Arc::new("workflow".to_string()),
response_channel: sender,
},
})
.expect("Failed to send get running workflow request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_none(), "Expected no workflow details returned");
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/mod.rs | mmids-core/src/workflows/mod.rs | //! A workflow represents a single media pipeline. Each workflow contains one or more steps that
//! can either receive video, transform video, or send video to other sources. Media data
//! transitions from one step to the next in a linear fashion based on the order in which they
//! were defined.
pub mod definitions;
pub mod manager;
pub mod metadata;
mod runner;
pub mod steps;
pub use runner::{start_workflow, WorkflowRequest, WorkflowRequestOperation, WorkflowStatus};
use crate::StreamId;
use bytes::Bytes;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use crate::workflows::metadata::MediaPayloadMetadataCollection;
pub use runner::{WorkflowState, WorkflowStepState};
/// Identifies the category of media contained within a payload
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum MediaType {
Audio,
Video,
Other,
}
/// Notification about media coming across a specific stream
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct MediaNotification {
/// The identifier for the stream that this notification pertains to
pub stream_id: StreamId,
/// The content of the notification message
pub content: MediaNotificationContent,
}
/// The detailed information contained within a media notification
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum MediaNotificationContent {
/// Announces that this stream has now connected, and steps that receive this notification
/// should prepare for media data to start coming through
NewIncomingStream {
/// The name for the stream that's being published
stream_name: Arc<String>,
},
/// Announces that this stream's source has disconnected and will no longer be sending any
/// new notifications down. Steps that receive this message can use this to clean up any
/// information they are tracking about this stream, as no new media will arrive without
/// a new `NewIncomingStream` announcement.
StreamDisconnected,
/// New stream metadata
Metadata { data: HashMap<String, String> },
/// An individual payload as part of this media stream
MediaPayload {
/// High level categorization of the media contained in this payload. Can be used by
/// consumers who do not necessarily care about how the bytes in the payload are formatted,
/// but just cares about categorization and metadata of the payload. E.g. an SFU may only
/// care about audio packets and their energy level metadata, but not what codec the audio
/// is formatted in.
media_type: MediaType,
/// High level description of the format of bytes contained in the payload. May be the name
/// of a codec (e.g. `aac`) but may also be more specific, such as a codec specific stream
/// format (e.g. `h264 avc`). The identifiers for these payload types will need to be
/// agreed upon, so different components can know when they support different payloads.
payload_type: Arc<String>,
/// How long since an unidentified epoch is this payload valid for. It cannot be assumed
/// that this is necessarily the duration from stream begin, but can be used to determine
/// when this payload should be decoded in comparison to payloads that came in before and
/// after it.
timestamp: Duration,
/// Metadata that's only specific to this individual payload
metadata: MediaPayloadMetadataCollection,
/// Actual payload bytes
data: Bytes,
/// Determines if this payload is a high priority packet that is required for decoding.
/// This is meant for sequence headers (for h264 and aac as an example) where later packets
/// cannot be decoded without it. These high priority packets are rarely re-sent, and
/// therefore this flag lets us know to cache them when this is `true`.
///
/// Flagging this as `true` will cause these packets to be cached, potentially until a
/// `StreamDisconnected` signal occurs, and therefore this must only be set for rare
/// high priority packets (i.e. not for key frames in video).
is_required_for_decoding: bool,
},
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/definitions.rs | mmids-core/src/workflows/definitions.rs | use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use std::hash::{Hash, Hasher};
use std::sync::Arc;
/// Identifier representing the type of the workflow step being defined
#[derive(Clone, Hash, Debug, Eq, PartialEq)]
pub struct WorkflowStepType(pub String);
/// Identifies a specific workflow step. Two steps with the same set of parameters and values will
/// always produce the same id within a single run of the the application, but the identifiers are
/// not guaranteed to be consistent across application runs.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct WorkflowStepId(pub u64);
/// The definition of a workflow step and any parameters it may be using
#[derive(Clone, Debug)]
pub struct WorkflowStepDefinition {
pub step_type: WorkflowStepType,
pub parameters: HashMap<String, Option<String>>,
}
/// The definition of a workflow and the steps (in order) it contains
#[derive(Clone, Debug)]
pub struct WorkflowDefinition {
pub name: Arc<String>,
pub routed_by_reactor: bool,
pub steps: Vec<WorkflowStepDefinition>,
}
impl std::fmt::Display for WorkflowStepType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl WorkflowStepDefinition {
/// Gets an identifier for the workflow step that's based on the step's parameters.
pub fn get_id(&self) -> WorkflowStepId {
let mut hasher = DefaultHasher::new();
self.hash(&mut hasher);
WorkflowStepId(hasher.finish())
}
}
impl Hash for WorkflowStepDefinition {
fn hash<H: Hasher>(&self, state: &mut H) {
let mut sorted_keys: Vec<&String> = self.parameters.keys().collect();
sorted_keys.sort();
self.step_type.hash(state);
for key in sorted_keys {
key.hash(state);
self.parameters.get(key).hash(state);
}
}
}
impl Display for WorkflowStepId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn two_steps_with_identical_setups_have_same_id() {
let mut step1 = WorkflowStepDefinition {
step_type: WorkflowStepType("test".to_string()),
parameters: HashMap::new(),
};
step1
.parameters
.insert("a".to_string(), Some("b".to_string()));
step1
.parameters
.insert("c".to_string(), Some("d".to_string()));
let mut step2 = WorkflowStepDefinition {
step_type: WorkflowStepType("test".to_string()),
parameters: HashMap::new(),
};
step2
.parameters
.insert("c".to_string(), Some("d".to_string()));
step2
.parameters
.insert("a".to_string(), Some("b".to_string()));
assert_eq!(step1.get_id(), step2.get_id());
}
#[test]
fn two_steps_with_different_types_do_not_have_same_id() {
let mut step1 = WorkflowStepDefinition {
step_type: WorkflowStepType("test".to_string()),
parameters: HashMap::new(),
};
step1
.parameters
.insert("a".to_string(), Some("b".to_string()));
step1
.parameters
.insert("c".to_string(), Some("d".to_string()));
let mut step2 = WorkflowStepDefinition {
step_type: WorkflowStepType("test2".to_string()),
parameters: HashMap::new(),
};
step2
.parameters
.insert("c".to_string(), Some("d".to_string()));
step2
.parameters
.insert("a".to_string(), Some("b".to_string()));
assert_ne!(step1.get_id(), step2.get_id());
}
#[test]
fn two_steps_with_different_parameters_do_not_have_same_id() {
let mut step1 = WorkflowStepDefinition {
step_type: WorkflowStepType("test".to_string()),
parameters: HashMap::new(),
};
step1
.parameters
.insert("a".to_string(), Some("b".to_string()));
step1
.parameters
.insert("c".to_string(), Some("d".to_string()));
let mut step2 = WorkflowStepDefinition {
step_type: WorkflowStepType("test2".to_string()),
parameters: HashMap::new(),
};
step2
.parameters
.insert("c".to_string(), Some("d".to_string()));
step2
.parameters
.insert("a".to_string(), Some("f".to_string()));
assert_ne!(step1.get_id(), step2.get_id());
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/metadata/klv.rs | mmids-core/src/workflows/metadata/klv.rs | //! Key-length-value encoding of byte data. Allows storing a set of data in a single contiguous
//! `Bytes` collection, enabling the storing of different types of data in re-usable memory
//! arenas, and being cheap to clone.
use anyhow::{anyhow, Result};
use bytes::{Buf, BufMut, Bytes, BytesMut};
/// An individual Key-Length-Value item
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct KlvItem {
/// Identifies the item in the KLV set. This should be unique for each logical type of
/// data being stored, although the key can be re-used if multiple values for the same type
/// of data exists (e.g. an array of values of the same type).
pub key: u16,
pub value: Bytes,
}
/// Storage of the KLV data
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct KlvStore {
data: Bytes,
}
/// Iterates through KLV data in order. This is a separate structure from `KlvData` to allow a
/// single instance of `KlvData` to be iterated more than once.
pub struct KlvIterator {
data: Bytes,
}
impl KlvStore {
/// Creates a new `KlvData` structure from an iterator of items. Items are stored in the order
/// they are returned in the iterator.
///
/// This function takes in a buffer that it should use to fill. This enables re-use of an
/// existing buffer arena to prevent allocations for this data if we can fit it in an existing
/// and unused `BytesMut` storage.
pub fn from_iter(
buffer: &mut BytesMut,
iterator: impl Iterator<Item = KlvItem>,
) -> Result<Self> {
let mut buffer = buffer.split_off(buffer.len());
for item in iterator {
if item.value.len() >= u16::MAX as usize {
return Err(anyhow!("Tlv value was too large"));
}
buffer.put_u16(item.key);
buffer.put_u16(item.value.len() as u16);
buffer.put(item.value);
}
Ok(KlvStore {
data: buffer.freeze(),
})
}
/// Creates a new iterator that goes through the items in the KLV store. This is guaranteed
/// to be in the same order that they were added in.
pub fn iter(&self) -> KlvIterator {
KlvIterator {
data: self.data.clone(),
}
}
}
impl Iterator for KlvIterator {
type Item = KlvItem;
fn next(&mut self) -> Option<Self::Item> {
if self.data.is_empty() {
return None;
}
let key = self.data.get_u16();
let length = self.data.get_u16();
let value = self.data.split_to(length as usize);
Some(KlvItem { key, value })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_create_and_read_items_from_klv_store() {
let items = [
KlvItem {
key: 1,
value: Bytes::from_static(&[1, 2, 3]),
},
KlvItem {
key: 202,
value: Bytes::from_static(&[4, 5]),
},
KlvItem {
key: 1,
value: Bytes::from_static(&[6]),
},
];
let store = KlvStore::from_iter(&mut BytesMut::new(), items.iter().cloned()).unwrap();
let mut iterator = store.iter();
assert_eq!(
iterator.next(),
Some(items[0].clone()),
"Unexpected first item"
);
assert_eq!(
iterator.next(),
Some(items[1].clone()),
"Unexpected second item"
);
assert_eq!(
iterator.next(),
Some(items[2].clone()),
"Unexpected third item"
);
assert_eq!(iterator.next(), None, "Expected no other items");
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/metadata/mod.rs | mmids-core/src/workflows/metadata/mod.rs | //! This module contains functionality for storing and retrieving metadata about individual
//! media payloads.
pub mod common_metadata;
mod keys;
mod klv;
use crate::workflows::metadata::klv::{KlvItem, KlvStore};
use bytes::{Buf, BufMut, Bytes, BytesMut};
use tracing::error;
pub use keys::{MetadataKey, MetadataKeyMap};
/// Allows storing arbitrary attributes and value pairs that can are relevant to an individual
/// media payload/packet. These are stored in a relatively efficient way to make it cheap to
/// clone and attempt to minimize per-packet heap allocations. Once a metadata collection has been
/// created it cannot be modified.
///
/// The metadata currently relies on being passed in a `BytesMut` buffer that it will use for
/// storage. This allows for the creator of media payloads to maintain an arena style memory
/// buffer that persists across media payloads, which should eventually cause each media payload
/// to no longer require its own heap allocation and efficiently re-use unreserved parts of the
/// memory buffer.
///
/// The trade off for cloning and allocation efficiency is that iterating through metadata is an
/// O(N) operation, which means if you need to look for a specific type of metadata you may have to
/// iterate through all other metadata items first. This tradeoff was deemed acceptable for now
/// with the idea that each payload would only have a small amount of metadata attached to it.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct MediaPayloadMetadataCollection {
data: KlvStore,
}
/// Declares what type of data is being stored in a metadata entry
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub enum MetadataValueType {
U8,
U16,
U32,
U64,
I8,
I16,
I32,
I64,
Bytes,
Bool,
}
/// An actual value stored in a metadata entry
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum MetadataValue {
U8(u8),
U16(u16),
U32(u32),
U64(u64),
I8(i8),
I16(i16),
I32(i32),
I64(i64),
Bytes(Bytes),
Bool(bool),
}
/// An individual key/value paired stored as metadata
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct MetadataEntry {
key: MetadataKey,
raw_value: Bytes,
}
/// Errors that can occur when creating a metadata entry
#[derive(thiserror::Error, Debug)]
pub enum MetadataEntryError {
#[error(
"Metadata entry's value was {value:?} but the type was expected to be {expected_type:?}"
)]
ValueDoesNotMatchType {
value: MetadataValue,
expected_type: MetadataValueType,
},
#[error("Entry's data was too large, and must be under 65,535")]
ValueTooLarge,
}
impl MediaPayloadMetadataCollection {
/// Creates a new collection of metadata based on the provided entries. A buffer is passed in
/// which can allow the creators of the collection to maintain an arena to reduce allocations
/// for each new metadata collection that is created.
pub fn new(entries: impl Iterator<Item = MetadataEntry>, buffer: &mut BytesMut) -> Self {
let mut klv_buffer = buffer.split_off(buffer.len());
let klv_items = entries.map(|e| KlvItem {
key: e.key.klv_id,
value: e.raw_value,
});
let klv_data = KlvStore::from_iter(&mut klv_buffer, klv_items).unwrap();
MediaPayloadMetadataCollection { data: klv_data }
}
/// Provides a non-consuming iterator that allows reading of entries within the collection
pub fn iter(&self) -> impl Iterator<Item = MetadataEntry> {
self.data.iter().map(|item| MetadataEntry {
key: MetadataKey::from_klv_id(item.key),
raw_value: item.value,
})
}
}
impl MetadataEntry {
/// Creates a new media metadata payload entry for the key and value pair
pub fn new(
key: MetadataKey,
value: MetadataValue,
buffer: &mut BytesMut,
) -> Result<Self, MetadataEntryError> {
let mut buffer = buffer.split_off(buffer.len());
match value {
MetadataValue::U8(num) => {
if key.value_type != MetadataValueType::U8 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::U8,
});
}
buffer.put_u8(num);
}
MetadataValue::U16(num) => {
if key.value_type != MetadataValueType::U16 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::U16,
});
}
buffer.put_u16(num);
}
MetadataValue::U32(num) => {
if key.value_type != MetadataValueType::U32 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::U32,
});
}
buffer.put_u32(num);
}
MetadataValue::U64(num) => {
if key.value_type != MetadataValueType::U64 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::U64,
});
}
buffer.put_u64(num);
}
MetadataValue::I8(num) => {
if key.value_type != MetadataValueType::I8 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::I8,
});
}
buffer.put_i8(num);
}
MetadataValue::I16(num) => {
if key.value_type != MetadataValueType::I16 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::I16,
});
}
buffer.put_i16(num);
}
MetadataValue::I32(num) => {
if key.value_type != MetadataValueType::I32 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::I32,
});
}
buffer.put_i32(num);
}
MetadataValue::I64(num) => {
if key.value_type != MetadataValueType::I64 {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::I64,
});
}
buffer.put_i64(num);
}
MetadataValue::Bool(boolean) => {
if key.value_type != MetadataValueType::Bool {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value,
expected_type: MetadataValueType::Bool,
});
}
buffer.put_u8(boolean.into());
}
MetadataValue::Bytes(bytes) => {
if key.value_type != MetadataValueType::Bytes {
return Err(MetadataEntryError::ValueDoesNotMatchType {
value: MetadataValue::Bytes(bytes),
expected_type: MetadataValueType::Bytes,
});
}
buffer.put(bytes);
}
}
if buffer.len() >= u16::MAX as usize {
return Err(MetadataEntryError::ValueTooLarge);
}
Ok(MetadataEntry {
key,
raw_value: buffer.freeze(),
})
}
/// Retrieves the key from the entry
pub fn key(&self) -> MetadataKey {
self.key
}
/// Retrieves the value from the entry
pub fn value(&self) -> MetadataValue {
// Clone to not advance the original buffer
let mut buffer = self.raw_value.clone();
// We shouldn't have to worry about validation as consumers should have only been able
// to create an entry via a `new()` call, and therefore we are sure the raw value is
// correct and matches.
match self.key.value_type {
MetadataValueType::U8 => MetadataValue::U8(buffer.get_u8()),
MetadataValueType::U16 => MetadataValue::U16(buffer.get_u16()),
MetadataValueType::U32 => MetadataValue::U32(buffer.get_u32()),
MetadataValueType::U64 => MetadataValue::U64(buffer.get_u64()),
MetadataValueType::I8 => MetadataValue::I8(buffer.get_i8()),
MetadataValueType::I16 => MetadataValue::I16(buffer.get_i16()),
MetadataValueType::I32 => MetadataValue::I32(buffer.get_i32()),
MetadataValueType::I64 => MetadataValue::I64(buffer.get_i64()),
MetadataValueType::Bytes => MetadataValue::Bytes(buffer),
MetadataValueType::Bool => match buffer.get_u8() {
0 => MetadataValue::Bool(false),
1 => MetadataValue::Bool(true),
x => panic!("Invalid boolean value of {}", x),
},
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
#[test]
fn can_create_and_get_value_from_u8_metadata_entry() {
let value = MetadataValue::U8(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::U8,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_u16_metadata_entry() {
let value = MetadataValue::U16(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::U16,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_u32_metadata_entry() {
let value = MetadataValue::U32(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::U32,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_u64_metadata_entry() {
let value = MetadataValue::U64(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::U64,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_i8_metadata_entry() {
let value = MetadataValue::I8(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::I8,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_i16_metadata_entry() {
let value = MetadataValue::I16(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::I16,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_i32_metadata_entry() {
let value = MetadataValue::I32(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::I32,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_i64_metadata_entry() {
let value = MetadataValue::I64(5);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::I64,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_bool_metadata_entry() {
let value = MetadataValue::Bool(true);
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::Bool,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_get_value_from_bytes_metadata_entry() {
let value = MetadataValue::Bytes(Bytes::from_static(&[1, 2, 3, 4, 5]));
let key = MetadataKey {
klv_id: 15,
value_type: MetadataValueType::Bytes,
};
let entry = MetadataEntry::new(key, value.clone(), &mut BytesMut::new()).unwrap();
let returned_value = entry.value();
assert_eq!(returned_value, value);
}
#[test]
fn can_create_and_retrieve_media_payload_metadata() {
let mut buffer = BytesMut::new();
let mut map = MetadataKeyMap::default();
let keys = [
map.register("first", MetadataValueType::U8),
map.register("second", MetadataValueType::Bool),
map.register("third", MetadataValueType::Bytes),
];
let values = [
MetadataValue::U8(5),
MetadataValue::Bool(true),
MetadataValue::Bytes(Bytes::from_static(&[1, 2, 3, 4])),
];
let entries = vec![
MetadataEntry::new(keys[0], values[0].clone(), &mut buffer).unwrap(),
MetadataEntry::new(keys[1], values[1].clone(), &mut buffer).unwrap(),
MetadataEntry::new(keys[2], values[2].clone(), &mut buffer).unwrap(),
];
let metadata =
MediaPayloadMetadataCollection::new(entries.clone().into_iter(), &mut buffer);
let mut iterator = metadata.iter();
assert_eq!(
iterator.next(),
Some(entries[0].clone()),
"Unexpected first entry"
);
assert_eq!(
iterator.next(),
Some(entries[1].clone()),
"Unexpected second entry"
);
assert_eq!(
iterator.next(),
Some(entries[2].clone()),
"Unexpected third entry"
);
assert_eq!(iterator.next(), None, "Unexpected fourth entry");
}
#[test]
fn media_payload_metadata_can_be_iterated_multiple_times() {
let mut buffer = BytesMut::new();
let mut map = MetadataKeyMap::default();
let keys = [
map.register("first", MetadataValueType::U8),
map.register("second", MetadataValueType::Bool),
];
let values = [MetadataValue::U8(5), MetadataValue::Bool(true)];
let entries = vec![
MetadataEntry::new(keys[0], values[0].clone(), &mut buffer).unwrap(),
MetadataEntry::new(keys[1], values[1].clone(), &mut buffer).unwrap(),
];
let metadata = MediaPayloadMetadataCollection::new(entries.into_iter(), &mut buffer);
assert_eq!(
metadata.iter().count(),
2,
"Unexpected number of items in iterator"
);
assert_eq!(
metadata.iter().count(),
2,
"Unexpected number of items in iterator"
);
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/metadata/common_metadata.rs | mmids-core/src/workflows/metadata/common_metadata.rs | //! Common types of media payload metadata that may be used
use crate::workflows::metadata::{MetadataKey, MetadataKeyMap, MetadataValueType};
/// Returns the metadata key for a metadata entry describing if a media payload is a video
/// key frame.
pub fn get_is_keyframe_metadata_key(metadata_map: &mut MetadataKeyMap) -> MetadataKey {
metadata_map.register("is_keyframe", MetadataValueType::Bool)
}
/// Returns the metadata key for a metadata entry describing the number of milliseconds the
/// pts (presentation timestamp) value is offset from the dts (decoding timestamp).
pub fn get_pts_offset_metadata_key(metadata_map: &mut MetadataKeyMap) -> MetadataKey {
metadata_map.register("pts_offset", MetadataValueType::I32)
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/metadata/keys.rs | mmids-core/src/workflows/metadata/keys.rs | use crate::workflows::metadata::MetadataValueType;
use std::collections::HashMap;
/// How much to shift a u16 in order to store/read the
const VALUE_TYPE_SHIFT: u16 = 12;
/// Distinctly identifies a single metadata attribute that can have data stored for it.
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct MetadataKey {
pub(super) klv_id: u16,
pub(super) value_type: MetadataValueType,
}
/// Creates distinct metadata keys for each unique combination of metadata name and value type.
/// If the same name and type pair are registered on the same `MetadataKeyMap` instance, then those
/// keys returned are guaranteed to be equal to each other.
///
/// Keys created between different instances of `MetadataKeyMap` are not guaranteed (nor likely)
/// to be consistent. Therefore, keys should only be compared with keys created by the same map
/// instance, and in most cases you probably only want one instance for the whole process.
#[derive(Default)]
pub struct MetadataKeyMap {
name_to_key_map: HashMap<MetadataNameTypePair, MetadataKey>,
next_id: u16,
}
#[derive(PartialEq, Eq, Hash)]
struct MetadataNameTypePair {
name: &'static str,
value_type: MetadataValueType,
}
impl MetadataKey {
pub(super) fn from_klv_id(klv_id: u16) -> Self {
let value_type = value_type_from_klv_id(klv_id);
MetadataKey { klv_id, value_type }
}
}
impl MetadataKeyMap {
/// Creates a new `MetadataKeyMap` instance
pub fn new() -> Self {
Default::default()
}
/// Registers the supplied metadata name and value type pair with the key map. If this
/// pair has not been registered yet then a new `MetadataKey` will be generated and returned.
/// If the same pair has already been registered, then the pre-generated key will be returned
pub fn register(&mut self, name: &'static str, value_type: MetadataValueType) -> MetadataKey {
let name_type_pair = MetadataNameTypePair { name, value_type };
match self.name_to_key_map.get(&name_type_pair) {
Some(key) => *key,
None => {
let id = apply_value_type_to_klv_id(self.next_id, value_type);
self.next_id = match self.next_id.checked_add(1) {
Some(num) => num,
None => panic!("Too many metadata key name and type pairs added to key map, only 4,095 are allowed"),
};
let key = MetadataKey {
klv_id: id,
value_type,
};
self.name_to_key_map.insert(name_type_pair, key);
key
}
}
}
}
fn apply_value_type_to_klv_id(id: u16, value_type: MetadataValueType) -> u16 {
let mut type_id = match value_type {
MetadataValueType::U8 => 1,
MetadataValueType::U16 => 2,
MetadataValueType::U32 => 3,
MetadataValueType::U64 => 4,
MetadataValueType::I8 => 5,
MetadataValueType::I16 => 6,
MetadataValueType::I32 => 7,
MetadataValueType::I64 => 8,
MetadataValueType::Bool => 9,
MetadataValueType::Bytes => 10,
};
type_id <<= VALUE_TYPE_SHIFT;
id | type_id
}
fn value_type_from_klv_id(klv_id: u16) -> MetadataValueType {
let value_type_id = klv_id >> VALUE_TYPE_SHIFT;
match value_type_id {
1 => MetadataValueType::U8,
2 => MetadataValueType::U16,
3 => MetadataValueType::U32,
4 => MetadataValueType::U64,
5 => MetadataValueType::I8,
6 => MetadataValueType::I16,
7 => MetadataValueType::I32,
8 => MetadataValueType::I64,
9 => MetadataValueType::Bool,
10 => MetadataValueType::Bytes,
x => panic!("Unknown value type id of {}", x),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_apply_u8_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::U8);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::U8);
}
#[test]
fn can_apply_u16_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::U16);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::U16);
}
#[test]
fn can_apply_u32_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::U32);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::U32);
}
#[test]
fn can_apply_u64_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::U64);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::U64);
}
#[test]
fn can_apply_i8_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::I8);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::I8);
}
#[test]
fn can_apply_i16_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::I16);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::I16);
}
#[test]
fn can_apply_i32_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::I32);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::I32);
}
#[test]
fn can_apply_i64_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::I64);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::I64);
}
#[test]
fn can_apply_bool_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::Bool);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::Bool);
}
#[test]
fn can_apply_bytes_value_type_to_klv_id() {
let original_id = 5;
let id = apply_value_type_to_klv_id(original_id, MetadataValueType::Bytes);
assert_ne!(
id, original_id,
"Applied id should not have been the same as the original id"
);
let value_type = value_type_from_klv_id(id);
assert_eq!(value_type, MetadataValueType::Bytes);
}
#[test]
fn same_name_type_pair_gets_same_key_returned() {
let name = "test123";
let mut map = MetadataKeyMap::default();
let key1 = map.register(name, MetadataValueType::U32);
let key2 = map.register(name, MetadataValueType::U32);
assert_eq!(key1, key2);
}
#[test]
fn different_name_gets_different_keys_returned() {
let name1 = "test123";
let name2 = "3456";
let mut map = MetadataKeyMap::default();
let key1 = map.register(name1, MetadataValueType::Bool);
let key2 = map.register(name2, MetadataValueType::Bool);
assert_ne!(key1, key2);
}
#[test]
fn different_type_gets_different_keys_returned() {
let name = "test123";
let mut map = MetadataKeyMap::default();
let key1 = map.register(name, MetadataValueType::Bool);
let key2 = map.register(name, MetadataValueType::U32);
assert_ne!(key1, key2);
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/runner/test_steps.rs | mmids-core/src/workflows/runner/test_steps.rs | use crate::workflows::definitions::WorkflowStepDefinition;
use crate::workflows::steps::factory::StepGenerator;
use crate::workflows::steps::futures_channel::{
FuturesChannelInnerResult, WorkflowStepFuturesChannel,
};
use crate::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use crate::workflows::MediaNotification;
use std::sync::atomic::{AtomicU16, Ordering};
use std::sync::Arc;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::watch::Receiver;
pub struct TestInputStepGenerator {
pub media_receiver: Receiver<MediaNotification>,
pub status_change: Receiver<StepStatus>,
pub future_result_media_receiver: Receiver<MediaNotification>,
pub media_received_count: Arc<AtomicU16>,
}
pub struct TestOutputStepGenerator {
pub media_sender: UnboundedSender<MediaNotification>,
pub status_change: Receiver<StepStatus>,
}
struct TestInputStep {
status: StepStatus,
media_receiver: Receiver<MediaNotification>,
status_receiver: Receiver<StepStatus>,
media_received_count: Arc<AtomicU16>,
}
struct TestOutputStep {
status: StepStatus,
media: UnboundedSender<MediaNotification>,
status_receiver: Receiver<StepStatus>,
}
impl StepFutureResult for InputFutureResult {}
enum InputFutureResult {
StatusChannelClosed,
MediaChannelClosed,
StatusReceived,
MediaReceived,
FutureResultMediaReceived(MediaNotification),
FutureResultMediaChannelClosed,
}
impl StepFutureResult for OutputFutureResult {}
enum OutputFutureResult {
StatusChannelClosed,
StatusReceived,
}
impl StepGenerator for TestInputStepGenerator {
fn generate(
&self,
_definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let step = TestInputStep {
status: StepStatus::Created,
media_receiver: self.media_receiver.clone(),
status_receiver: self.status_change.clone(),
media_received_count: self.media_received_count.clone(),
};
input_media_received(self.media_receiver.clone(), &futures_channel);
input_status_received(self.status_change.clone(), &futures_channel);
futures_channel.send_on_generic_watch_recv(
self.future_result_media_receiver.clone(),
|media| InputFutureResult::FutureResultMediaReceived(media.clone()),
|| InputFutureResult::FutureResultMediaChannelClosed,
);
Ok((Box::new(step), StepStatus::Created))
}
}
impl StepGenerator for TestOutputStepGenerator {
fn generate(
&self,
_definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let step = TestOutputStep {
status: StepStatus::Created,
media: self.media_sender.clone(),
status_receiver: self.status_change.clone(),
};
output_status_received(self.status_change.clone(), &futures_channel);
Ok((Box::new(step), StepStatus::Created))
}
}
impl WorkflowStep for TestInputStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for notification in inputs.notifications.drain(..) {
let future_result = match notification.downcast::<InputFutureResult>() {
Ok(result) => result,
Err(_) => panic!("Received future that wasn't an InputFutureResult"),
};
match *future_result {
InputFutureResult::MediaChannelClosed => {
self.status = StepStatus::Error {
message: "media channel closed".to_string(),
};
}
InputFutureResult::StatusChannelClosed => {
self.status = StepStatus::Error {
message: "status channel closed".to_string(),
};
}
InputFutureResult::MediaReceived => {
let media = (*self.media_receiver.borrow()).clone();
outputs.media.push(media);
self.media_received_count.fetch_add(1, Ordering::SeqCst);
}
InputFutureResult::StatusReceived => {
let status = (*self.status_receiver.borrow()).clone();
self.status = status;
}
InputFutureResult::FutureResultMediaReceived(media) => {
let _ = futures_channel.send(FuturesChannelInnerResult::Media(media));
}
InputFutureResult::FutureResultMediaChannelClosed => {
self.status = StepStatus::Error {
message: "futures media channel closed".to_string(),
};
}
}
}
for media in inputs.media.drain(..) {
outputs.media.push(media); // for workflow forwarding tests
self.media_received_count.fetch_add(1, Ordering::SeqCst);
}
self.status.clone()
}
}
impl WorkflowStep for TestOutputStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
_outputs: &mut StepOutputs,
_futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for notification in inputs.notifications.drain(..) {
let future_result = match notification.downcast::<OutputFutureResult>() {
Ok(result) => result,
Err(_) => panic!("Received future that wasn't an OutputFutureResult"),
};
match *future_result {
OutputFutureResult::StatusChannelClosed => {
self.status = StepStatus::Error {
message: "status channel closed".to_string(),
};
}
OutputFutureResult::StatusReceived => {
self.status = (*self.status_receiver.borrow()).clone();
}
}
}
for media in inputs.media.drain(..) {
let _ = self.media.send(media);
}
self.status.clone()
}
}
fn input_media_received(
receiver: Receiver<MediaNotification>,
futures_channel: &WorkflowStepFuturesChannel,
) {
futures_channel.send_on_generic_watch_recv(
receiver,
|_| InputFutureResult::MediaReceived,
|| InputFutureResult::MediaChannelClosed,
);
}
fn input_status_received(
receiver: Receiver<StepStatus>,
futures_channel: &WorkflowStepFuturesChannel,
) {
futures_channel.send_on_generic_watch_recv(
receiver,
|_| InputFutureResult::StatusReceived,
|| InputFutureResult::StatusChannelClosed,
);
}
fn output_status_received(
receiver: Receiver<StepStatus>,
futures_channel: &WorkflowStepFuturesChannel,
) {
futures_channel.send_on_generic_watch_recv(
receiver,
|_| OutputFutureResult::StatusReceived,
|| OutputFutureResult::StatusChannelClosed,
);
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/runner/test_context.rs | mmids-core/src/workflows/runner/test_context.rs | use crate::workflows::definitions::{
WorkflowDefinition, WorkflowStepDefinition, WorkflowStepId, WorkflowStepType,
};
use crate::workflows::runner::test_steps::{TestInputStepGenerator, TestOutputStepGenerator};
use crate::workflows::steps::factory::WorkflowStepFactory;
use crate::workflows::steps::StepStatus;
use crate::workflows::{
start_workflow, MediaNotification, MediaNotificationContent, WorkflowRequest,
};
use crate::StreamId;
use std::collections::HashMap;
use std::sync::atomic::AtomicU16;
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::watch::{channel, Sender};
pub struct TestContext {
pub workflow: UnboundedSender<WorkflowRequest>,
pub input_media_sender: Sender<MediaNotification>,
pub output_step_media_receiver: UnboundedReceiver<MediaNotification>,
pub input_status: Sender<StepStatus>,
pub output_status: Sender<StepStatus>,
pub input_step_id: WorkflowStepId,
pub output_step_id: WorkflowStepId,
pub input_future_media_sender: Sender<MediaNotification>,
pub input_step_media_received_count: Arc<AtomicU16>,
}
impl TestContext {
pub fn new() -> Self {
let (input_media_sender, input_media_receiver) = channel(MediaNotification {
stream_id: StreamId(Arc::new("invalid".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
let (output_media_sender, output_media_receiver) = unbounded_channel();
let (input_status_sender, input_status_receiver) = channel(StepStatus::Created);
let (output_status_sender, output_status_receiver) = channel(StepStatus::Created);
// let (input_media_sender, input_media_receiver) = unbounded_channel();
let (future_media_sender, future_media_receiver) = channel(MediaNotification {
stream_id: StreamId(Arc::new("bad".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
let input_received_counter = Arc::new(AtomicU16::new(0));
let input_step = TestInputStepGenerator {
media_receiver: input_media_receiver,
status_change: input_status_receiver,
future_result_media_receiver: future_media_receiver,
media_received_count: input_received_counter.clone(),
};
let output_step = TestOutputStepGenerator {
media_sender: output_media_sender,
status_change: output_status_receiver,
};
let mut factory = WorkflowStepFactory::new();
factory
.register(WorkflowStepType("input".to_string()), Box::new(input_step))
.expect("Failed to register input step");
factory
.register(
WorkflowStepType("output".to_string()),
Box::new(output_step),
)
.expect("Failed to register output step");
let definition = WorkflowDefinition {
name: Arc::new("abc".to_string()),
routed_by_reactor: false,
steps: vec![
WorkflowStepDefinition {
step_type: WorkflowStepType("input".to_string()),
parameters: HashMap::new(),
},
WorkflowStepDefinition {
step_type: WorkflowStepType("output".to_string()),
parameters: HashMap::new(),
},
],
};
let input_step_id = definition.steps[0].get_id();
let output_step_id = definition.steps[1].get_id();
let workflow = start_workflow(definition, Arc::new(factory));
TestContext {
workflow,
input_media_sender,
output_step_media_receiver: output_media_receiver,
input_status: input_status_sender,
output_status: output_status_sender,
input_step_id,
output_step_id,
input_future_media_sender: future_media_sender,
input_step_media_received_count: input_received_counter,
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/runner/tests.rs | mmids-core/src/workflows/runner/tests.rs | use crate::workflows::definitions::{WorkflowDefinition, WorkflowStepDefinition, WorkflowStepType};
use crate::workflows::runner::test_context::TestContext;
use crate::workflows::steps::factory::WorkflowStepFactory;
use crate::workflows::steps::StepStatus;
use crate::workflows::{
start_workflow, MediaNotification, MediaNotificationContent, WorkflowRequest,
WorkflowRequestOperation, WorkflowStatus,
};
use crate::{test_utils, StreamId};
use std::collections::HashMap;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::oneshot::channel;
use tokio::time::timeout;
#[tokio::test]
async fn workflow_created_with_steps_in_pending_state() {
let context = TestContext::new();
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
assert_eq!(
workflow.status,
WorkflowStatus::Running,
"Expected workflow to be running"
);
assert_eq!(workflow.active_steps.len(), 0, "Expected zero active steps");
assert_eq!(
workflow.pending_steps.len(),
2,
"Expected two pending steps"
);
}
#[tokio::test]
async fn steps_pending_when_only_first_step_is_active() {
let context = TestContext::new();
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
assert_eq!(
workflow.status,
WorkflowStatus::Running,
"Expected workflow to be running"
);
assert_eq!(workflow.active_steps.len(), 0, "Expected zero active steps");
assert_eq!(
workflow.pending_steps.len(),
2,
"Expected two pending steps"
);
}
#[tokio::test]
async fn steps_pending_when_only_second_step_is_active() {
let context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
assert_eq!(
workflow.status,
WorkflowStatus::Running,
"Expected workflow to be running"
);
assert_eq!(workflow.active_steps.len(), 0, "Expected zero active steps");
assert_eq!(
workflow.pending_steps.len(),
2,
"Expected two pending steps"
);
}
#[tokio::test]
async fn steps_active_when_all_pending_steps_become_active() {
let context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
assert_eq!(
workflow.status,
WorkflowStatus::Running,
"Expected workflow to be running"
);
assert_eq!(workflow.active_steps.len(), 0, "Expected zero active steps");
assert_eq!(
workflow.pending_steps.len(),
2,
"Expected two pending steps"
);
}
#[tokio::test]
async fn workflow_in_error_state_if_any_step_goes_to_error_state() {
let context = TestContext::new();
context
.output_status
.send(StepStatus::Error {
message: "hi".to_string(),
})
.expect("Failed to set output state");
tokio::time::sleep(Duration::from_millis(10)).await;
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
match workflow.status {
WorkflowStatus::Error {
message: _,
failed_step_id,
} => {
assert_eq!(
failed_step_id, context.output_step_id.0,
"Unexpected failed step id"
);
}
status => panic!("Unexpected workflow status: {:?}", status),
}
}
#[tokio::test]
async fn workflow_passes_media_from_one_step_to_the_next() {
let mut context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
tokio::time::sleep(Duration::from_millis(10)).await;
context
.input_media_sender
.send(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
})
.expect("Failed to send media notification to step");
let response = test_utils::expect_mpsc_response(&mut context.output_step_media_receiver).await;
assert_eq!(
response.stream_id,
StreamId(Arc::new("abc".to_string())),
"Unexpected stream id"
);
match response.content {
MediaNotificationContent::StreamDisconnected => (),
x => panic!("Unexpected media notification: {:?}", x),
}
}
#[tokio::test]
async fn media_sent_to_workflow_flows_through_steps() {
let mut context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
tokio::time::sleep(Duration::from_millis(10)).await;
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
},
},
})
.expect("Failed to send media to workflow");
let response = test_utils::expect_mpsc_response(&mut context.output_step_media_receiver).await;
assert_eq!(
response.stream_id,
StreamId(Arc::new("abc".to_string())),
"Unexpected stream id"
);
match response.content {
MediaNotificationContent::StreamDisconnected => (),
x => panic!("Unexpected media notification: {:?}", x),
}
}
#[tokio::test]
async fn steps_in_active_workflow_are_pending() {
let context = TestContext::new();
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
tokio::time::sleep(Duration::from_millis(10)).await;
// Otherwise pending step will immediately get a resolved future as active
context
.output_status
.send(StepStatus::Created)
.expect("Failed to set output state");
let mut params = HashMap::new(); // parameters will give it a new id
params.insert("a".to_string(), Some("b".to_string()));
let definition = WorkflowDefinition {
name: Arc::new("abc".to_string()),
routed_by_reactor: false,
steps: vec![WorkflowStepDefinition {
step_type: WorkflowStepType("output".to_string()),
parameters: params,
}],
};
let new_step_id = definition.steps[0].get_id();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::UpdateDefinition {
new_definition: definition,
},
})
.expect("Failed ot send update request");
tokio::time::sleep(Duration::from_millis(10)).await;
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
assert_eq!(
workflow.status,
WorkflowStatus::Running,
"Expected workflow to be running"
);
assert_eq!(workflow.active_steps.len(), 2, "Expected two active steps");
assert_eq!(
workflow.pending_steps.len(),
1,
"Expected one pending steps"
);
assert_eq!(
workflow.pending_steps[0].step_id, new_step_id,
"Unexpected pending step id"
);
}
#[tokio::test]
async fn new_pending_steps_replace_active_steps_when_pending_steps_get_active_status() {
let context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
tokio::time::sleep(Duration::from_millis(10)).await;
// Otherwise pending step will immediately get a resolved future as active
context
.output_status
.send(StepStatus::Created)
.expect("Failed to set output state");
let mut params1 = HashMap::new(); // parameters will give it a new id
params1.insert("a".to_string(), Some("b".to_string()));
let mut params2 = HashMap::new();
params2.insert("c".to_string(), None);
let definition = WorkflowDefinition {
name: Arc::new("abc".to_string()),
routed_by_reactor: false,
steps: vec![
WorkflowStepDefinition {
step_type: WorkflowStepType("output".to_string()),
parameters: params1,
},
WorkflowStepDefinition {
step_type: WorkflowStepType("output".to_string()),
parameters: params2,
},
],
};
let step1_id = definition.steps[0].get_id();
let step2_id = definition.steps[1].get_id();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::UpdateDefinition {
new_definition: definition,
},
})
.expect("Failed ot send update request");
tokio::time::sleep(Duration::from_millis(10)).await;
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
tokio::time::sleep(Duration::from_millis(10)).await;
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request to workflow");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected workflow state returned");
let workflow = response.unwrap();
assert_eq!(
workflow.status,
WorkflowStatus::Running,
"Expected workflow to be running"
);
assert_eq!(
workflow.active_steps.len(),
2,
"Unexpected number of active steps"
);
assert_eq!(
workflow.active_steps[0].step_id, step1_id,
"Unexpected active step 1 id"
);
assert_eq!(
workflow.active_steps[1].step_id, step2_id,
"Unexpected active step 2 id"
);
assert_eq!(
workflow.pending_steps.len(),
0,
"Unexpected number of pending steps"
);
}
#[tokio::test]
async fn channel_closed_after_shutdown() {
let context = TestContext::new();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::StopWorkflow,
})
.expect("Failed to send shutdown message");
match timeout(Duration::from_millis(10), context.workflow.closed()).await {
Ok(_) => (),
Err(_) => panic!("Workflow channel didn't close"),
}
}
#[tokio::test]
async fn workflow_in_error_state_if_factory_cant_find_step() {
let factory = Arc::new(WorkflowStepFactory::new());
let definition = WorkflowDefinition {
name: Arc::new("abc".to_string()),
routed_by_reactor: false,
steps: vec![WorkflowStepDefinition {
step_type: WorkflowStepType("input".to_string()),
parameters: HashMap::new(),
}],
};
let step_id = definition.steps[0].get_id();
let workflow = start_workflow(definition, factory);
tokio::time::sleep(Duration::from_millis(10)).await;
let (sender, receiver) = channel();
workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected valid response");
match response.unwrap().status {
WorkflowStatus::Error {
message: _,
failed_step_id,
} => {
assert_eq!(failed_step_id, step_id.0, "Unexpected failed step id");
}
status => panic!("Unexpected workflow status: {:?}", status),
}
}
#[tokio::test]
async fn workflow_in_error_state_if_updated_steps_arent_registered_with_factory() {
let context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
tokio::time::sleep(Duration::from_millis(10)).await;
let definition = WorkflowDefinition {
name: Arc::new("abc".to_string()),
routed_by_reactor: false,
steps: vec![WorkflowStepDefinition {
step_type: WorkflowStepType("output2".to_string()),
parameters: HashMap::new(),
}],
};
let step1_id = definition.steps[0].get_id();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::UpdateDefinition {
new_definition: definition,
},
})
.expect("Failed to send update request");
tokio::time::sleep(Duration::from_millis(10)).await;
let (sender, receiver) = channel();
context
.workflow
.send(WorkflowRequest {
request_id: "".to_string(),
operation: WorkflowRequestOperation::GetState {
response_channel: sender,
},
})
.expect("Failed to send get state request");
let response = test_utils::expect_oneshot_response(receiver).await;
assert!(response.is_some(), "Expected valid response");
match response.unwrap().status {
WorkflowStatus::Error {
message: _,
failed_step_id,
} => {
assert_eq!(failed_step_id, step1_id.0, "Unexpected failed step id");
}
status => panic!("Unexpected workflow status: {:?}", status),
}
}
#[tokio::test]
async fn media_future_result_from_active_step_immediately_goes_to_next_step() {
let mut context = TestContext::new();
context
.output_status
.send(StepStatus::Active)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Active)
.expect("Failed to set input state");
tokio::time::sleep(Duration::from_millis(10)).await;
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
};
context
.input_future_media_sender
.send(media.clone())
.expect("Failed to send media notification via futures channel");
let response = test_utils::expect_mpsc_response(&mut context.output_step_media_receiver).await;
assert_eq!(media, response, "Unexpected media packet");
let count = context
.input_step_media_received_count
.load(Ordering::Acquire);
assert_eq!(count, 0, "Expected no media received by first step")
}
#[tokio::test]
async fn media_future_result_from_pending_step_does_not_go_to_next_step() {
let mut context = TestContext::new();
context
.output_status
.send(StepStatus::Created)
.expect("Failed to set output state");
context
.input_status
.send(StepStatus::Created)
.expect("Failed to set input state");
tokio::time::sleep(Duration::from_millis(10)).await;
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
};
context
.input_future_media_sender
.send(media.clone())
.expect("Failed to send media notification via futures channel");
test_utils::expect_mpsc_timeout(&mut context.output_step_media_receiver).await;
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/runner/mod.rs | mmids-core/src/workflows/runner/mod.rs | #[cfg(test)]
mod test_context;
#[cfg(test)]
mod test_steps;
#[cfg(test)]
mod tests;
use crate::actor_utils::notify_on_unbounded_recv;
use crate::workflows::definitions::{WorkflowDefinition, WorkflowStepDefinition, WorkflowStepId};
use crate::workflows::steps::factory::WorkflowStepFactory;
use crate::workflows::steps::futures_channel::{
FuturesChannelInnerResult, FuturesChannelResult, WorkflowStepFuturesChannel,
};
use crate::workflows::steps::{
StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use crate::workflows::{MediaNotification, MediaNotificationContent};
use crate::StreamId;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::Sender;
use tracing::{error, info, instrument, span, warn, Level};
/// A request to the workflow to perform an action
#[derive(Debug)]
pub struct WorkflowRequest {
/// An identifier that can be used to correlate this request with its
pub request_id: String,
pub operation: WorkflowRequestOperation,
}
/// Operations that can be made to an actively running workflow
#[derive(Debug)]
pub enum WorkflowRequestOperation {
/// Requests the workflow update with a new definition. The workflow will take shape to look
/// exactly as the specified definition has. Any existing steps that aren't specified will
/// be removed, any new steps will be created, and any steps that stay will reflect the order
/// specified.
UpdateDefinition { new_definition: WorkflowDefinition },
/// Requests the workflow to return a snapshot of its current state
GetState {
response_channel: Sender<Option<WorkflowState>>,
},
/// Requests the workflow stop operating
StopWorkflow,
/// Sends a media notification to this stream
MediaNotification { media: MediaNotification },
}
#[derive(Debug)]
pub struct WorkflowState {
pub status: WorkflowStatus,
pub active_steps: Vec<WorkflowStepState>,
pub pending_steps: Vec<WorkflowStepState>,
}
#[derive(Debug)]
pub struct WorkflowStepState {
pub step_id: WorkflowStepId,
pub definition: WorkflowStepDefinition,
pub status: StepStatus,
}
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum WorkflowStatus {
Running,
Error {
failed_step_id: u64,
message: String,
},
}
/// Starts the execution of a workflow with the specified definition
pub fn start_workflow(
definition: WorkflowDefinition,
step_factory: Arc<WorkflowStepFactory>,
) -> UnboundedSender<WorkflowRequest> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = Actor::new(&definition, step_factory, receiver, actor_sender);
tokio::spawn(actor.run(definition, actor_receiver));
sender
}
enum FutureResult {
AllConsumersGone,
WorkflowRequestReceived(WorkflowRequest),
StepFutureSendersGone,
StepFutureResolved(FuturesChannelResult),
}
struct StreamDetails {
/// The step that first sent a new stream media notification. We know that if this step is
/// removed, the stream no longer has a source of video and should be considered disconnected
originating_step_id: WorkflowStepId,
}
struct TrackedWorkflowStep {
instance: Option<Box<dyn WorkflowStep + Send>>,
status: StepStatus,
}
struct Actor {
name: Arc<String>,
steps_by_definition_id: HashMap<WorkflowStepId, TrackedWorkflowStep>,
active_steps: Vec<WorkflowStepId>,
pending_steps: Vec<WorkflowStepId>,
step_inputs: StepInputs,
step_outputs: StepOutputs,
cached_step_media: HashMap<WorkflowStepId, HashMap<StreamId, Vec<MediaNotification>>>,
cached_inbound_media: HashMap<StreamId, Vec<MediaNotification>>,
active_streams: HashMap<StreamId, StreamDetails>,
step_factory: Arc<WorkflowStepFactory>,
step_definitions: HashMap<WorkflowStepId, WorkflowStepDefinition>,
status: WorkflowStatus,
step_futures_sender: UnboundedSender<FuturesChannelResult>,
}
impl Actor {
#[instrument(skip_all, fields(workflow_name = %definition.name))]
fn new(
definition: &WorkflowDefinition,
step_factory: Arc<WorkflowStepFactory>,
receiver: UnboundedReceiver<WorkflowRequest>,
actor_sender: UnboundedSender<FutureResult>,
) -> Self {
notify_on_unbounded_recv(
receiver,
actor_sender.clone(),
FutureResult::WorkflowRequestReceived,
|| FutureResult::AllConsumersGone,
);
let (futures_sender, futures_receiver) = unbounded_channel();
notify_on_unbounded_recv(
futures_receiver,
actor_sender,
FutureResult::StepFutureResolved,
|| FutureResult::StepFutureSendersGone,
);
Actor {
name: definition.name.clone(),
steps_by_definition_id: HashMap::new(),
active_steps: Vec::new(),
pending_steps: Vec::new(),
step_inputs: StepInputs::new(),
step_outputs: StepOutputs::new(),
cached_step_media: HashMap::new(),
cached_inbound_media: HashMap::new(),
active_streams: HashMap::new(),
step_factory,
step_definitions: HashMap::new(),
status: WorkflowStatus::Running,
step_futures_sender: futures_sender,
}
}
#[instrument(name = "Workflow Execution", skip_all, fields(workflow_name = %self.name))]
async fn run(
mut self,
initial_definition: WorkflowDefinition,
mut receiver: UnboundedReceiver<FutureResult>,
) {
info!("Starting workflow");
self.apply_new_definition(initial_definition);
while let Some(future) = receiver.recv().await {
match future {
FutureResult::AllConsumersGone => {
warn!("All channel owners gone");
break;
}
FutureResult::StepFutureSendersGone => {
panic!(
"All workflow runner future senders are gone, but should be impossible \
since the actor holds a sender."
);
}
FutureResult::WorkflowRequestReceived(request) => {
let mut stop_workflow = false;
self.handle_workflow_request(request, &mut stop_workflow);
if stop_workflow {
break;
}
}
FutureResult::StepFutureResolved(value) => {
let step_id = value.step_id;
match value.result {
FuturesChannelInnerResult::Generic(result) => {
self.execute_steps(step_id, Some(result), false, true);
}
FuturesChannelInnerResult::Media(media) => {
// Handle the media as if it came as an output of a normal step execution
self.step_outputs.clear();
self.step_outputs.media.push(media);
self.handle_executed_step_outputs(step_id);
// If this came from an active step, then we need to start executing
// inputs from the *next* step after the one that produced this media.
// This works because `handle_executed_step_outputs` takes outputs
// and puts them properly into inputs, thus allowing us to just
// execute the next step normally.
if let Some(step_index) = self.get_active_step_index(step_id) {
let next_step_index = step_index + 1;
if let Some(next_step_id) = self.active_steps.get(next_step_index) {
self.execute_steps(*next_step_id, None, true, false);
}
}
}
}
}
}
}
info!("Workflow closing");
}
#[instrument(skip(self, request, stop_workflow), fields(request_id = %request.request_id))]
fn handle_workflow_request(&mut self, request: WorkflowRequest, stop_workflow: &mut bool) {
match request.operation {
WorkflowRequestOperation::UpdateDefinition { new_definition } => {
self.apply_new_definition(new_definition);
}
WorkflowRequestOperation::GetState { response_channel } => {
info!("Workflow state requested by external caller");
let mut state = WorkflowState {
status: self.status.clone(),
pending_steps: Vec::new(),
active_steps: Vec::new(),
};
for id in &self.pending_steps {
if let Some(definition) = self.step_definitions.get(id) {
if let Some(step) = self.steps_by_definition_id.get(id) {
state.pending_steps.push(WorkflowStepState {
step_id: *id,
definition: definition.clone(),
status: step.status.clone(),
});
} else {
state.pending_steps.push(WorkflowStepState {
step_id: *id,
definition: definition.clone(),
status: StepStatus::Error {
message: "Step not instantiated".to_string(),
},
});
}
} else {
error!(step_id = %id, "No definition was found for step id {}", id.0);
}
}
for id in &self.active_steps {
if let Some(definition) = self.step_definitions.get(id) {
if let Some(step) = self.steps_by_definition_id.get(id) {
state.active_steps.push(WorkflowStepState {
step_id: *id,
definition: definition.clone(),
status: step.status.clone(),
});
} else {
state.active_steps.push(WorkflowStepState {
step_id: *id,
definition: definition.clone(),
status: StepStatus::Error {
message: "Step not instantiated".to_string(),
},
});
}
} else {
error!(step_id = %id, "No definition was found for step id {}", id.0);
}
}
let _ = response_channel.send(Some(state));
}
WorkflowRequestOperation::StopWorkflow => {
info!("Closing workflow as requested");
*stop_workflow = true;
for id in &self.active_steps {
if let Some(step) = self.steps_by_definition_id.get_mut(id) {
step.instance.take(); // drop it to shut it down
if !matches!(&step.status, &StepStatus::Error { .. }) {
step.status = StepStatus::Shutdown;
}
}
}
for id in &self.pending_steps {
if let Some(step) = self.steps_by_definition_id.get_mut(id) {
step.instance.take(); // drop it to shut it down
if !matches!(&step.status, &StepStatus::Error { .. }) {
step.status = StepStatus::Shutdown;
}
}
}
}
WorkflowRequestOperation::MediaNotification { media } => {
self.update_inbound_media_cache(&media);
self.step_inputs.clear();
self.step_inputs.media.push(media);
if let Some(id) = self.active_steps.first() {
let id = *id;
self.execute_steps(id, None, true, true);
}
}
}
}
fn apply_new_definition(&mut self, definition: WorkflowDefinition) {
let new_step_ids = definition
.steps
.iter()
.map(|x| x.get_id())
.collect::<HashSet<_>>();
if self.status == WorkflowStatus::Running
&& self.pending_steps.is_empty()
&& self.active_steps.len() == new_step_ids.len()
&& self.active_steps.iter().all(|x| new_step_ids.contains(x))
{
// No actual changes to this workflow
return;
}
info!(
"Applying a new workflow definition with {} steps",
definition.steps.len()
);
// If the workflow is in an errored state, clear out all the existing steps, as they've
// been shut down anyway. So start this from a clean state
if let WorkflowStatus::Error {
message: _,
failed_step_id: _,
} = &self.status
{
self.active_steps.clear();
self.steps_by_definition_id.clear();
self.status = WorkflowStatus::Running;
}
self.pending_steps.clear();
for step_definition in definition.steps {
let id = step_definition.get_id();
let step_type = step_definition.step_type.clone();
self.step_definitions
.insert(step_definition.get_id(), step_definition.clone());
self.pending_steps.push(id);
if let Entry::Vacant(entry) = self.steps_by_definition_id.entry(id) {
let span = span!(Level::INFO, "Step Creation", step_id = %id);
let _enter = span.enter();
let mut details = format!("{}: ", step_definition.step_type.0);
for (key, value) in &step_definition.parameters {
match value {
Some(value) => details.push_str(&format!("{}={} ", key, value)),
None => details.push_str(&format!("{} ", key)),
};
}
info!("Creating step {}", details);
let step_result = self
.step_factory
.create_step(step_definition, &self.step_futures_sender);
let step_result = match step_result {
Ok(step_result) => step_result,
Err(error) => {
error!("Step factory failed to generate step instance: {:?}", error);
self.set_status_to_error(
id,
format!("Failed to generate step instance: {:?}", error),
);
return;
}
};
let (step, status) = match step_result {
Ok(step) => step,
Err(error) => {
error!("Step could not be generated: {}", error);
self.set_status_to_error(id, format!("Failed to generate step: {}", error));
return;
}
};
let tracked_step = TrackedWorkflowStep {
instance: Some(step),
status,
};
entry.insert(tracked_step);
info!("Step type '{}' created", step_type);
}
}
self.check_if_all_pending_steps_are_active(true);
}
fn execute_steps(
&mut self,
initial_step_id: WorkflowStepId,
future_result: Option<Box<dyn StepFutureResult>>,
preserve_current_step_inputs: bool,
perform_pending_check: bool,
) {
if self.status != WorkflowStatus::Running {
return;
}
if !preserve_current_step_inputs {
self.step_inputs.clear();
}
self.step_outputs.clear();
if let Some(future_result) = future_result {
self.step_inputs.notifications.push(future_result);
}
// If we have a start_index, that means the step we want to execute is an active step. So
// execute that step and all active steps after it. If it's not an active step, then we
// only want to execute that one step and none others.
let start_index = self.get_active_step_index(initial_step_id);
if let Some(start_index) = start_index {
for x in start_index..self.active_steps.len() {
self.execute_step(self.active_steps[x]);
}
} else {
self.execute_step(initial_step_id);
}
if perform_pending_check {
self.check_if_all_pending_steps_are_active(false);
}
}
fn execute_step(&mut self, step_id: WorkflowStepId) {
if self.status != WorkflowStatus::Running {
return;
}
let span = span!(Level::INFO, "Step Execution", step_id = %step_id);
let _enter = span.enter();
let step = match self.steps_by_definition_id.get_mut(&step_id) {
Some(x) => x,
None => {
let is_active = self.active_steps.contains(&step_id);
error!(
"Attempted to execute step id {} but we it has no definition (is active: {})",
step_id.0, is_active
);
return;
}
};
let step_instance = match step.instance.as_mut() {
Some(instance) => instance,
None => return, // We have no step instance to run. Might need to check status here?
};
let channel = WorkflowStepFuturesChannel::new(step_id, self.step_futures_sender.clone());
let new_status =
step_instance.execute(&mut self.step_inputs, &mut self.step_outputs, channel);
step.status = new_status;
if let StepStatus::Error { message } = &step.status {
let message = message.clone();
self.set_status_to_error(step_id, message);
return;
}
self.handle_executed_step_outputs(step_id);
}
fn check_if_all_pending_steps_are_active(&mut self, swap_if_pending_is_empty: bool) {
let mut all_are_active = true;
for id in &self.pending_steps {
let step = match self.steps_by_definition_id.get(id) {
Some(x) => Some(x),
None => {
error!(
step_id = %id,
"Workflow had step id {} pending but this step was not defined", id.0
);
let id = *id;
self.set_status_to_error(id, "workflow step not defined".to_string());
return;
}
};
if let Some(step) = step {
match &step.status {
StepStatus::Created => all_are_active = false,
StepStatus::Active => (),
StepStatus::Error { message } => {
let id = *id;
let message = message.clone();
self.set_status_to_error(id, message);
return;
}
StepStatus::Shutdown => return,
}
} else {
// the step is still waiting to be instantiated by the factory
}
}
if (!self.pending_steps.is_empty() && all_are_active)
|| (self.pending_steps.is_empty() && swap_if_pending_is_empty)
{
// Since we have pending steps and all are now ready to become active, we need to
// swap all active steps for pending steps to make them active.
// In the case of `swap_if_pending_is_empty`, this is usually the case if the user
// updates this workflow with a definition that contains no workflow steps, then that
// means the user specifically wants this workflow empty. So we need to tear down all
// active steps.
// Note: there's a possibility that a pending swap can trigger a new set
// of sequence headers to fall through. An example of this happening is if
// a transcoding step is placed in between an existing playback step. This
// will probably cause playback issues unless the client supports changing
// decoding parameters mid-stream, which isn't certain. We either need to
// leave this up to mmids operators to realize, or need to come up with a
// solution to remove the footgun (such as disconnecting playback clients
// upon a new sequence header being seen). Unsure if that's the best
// approach though.
for index in (0..self.active_steps.len()).rev() {
let step_id = self.active_steps[index];
if !self.pending_steps.contains(&step_id) {
// Since this step is currently active but not pending, the swap will make this
// step go away for good. Therefore, we need to clean up its definition and
// raise disconnection notices for any streams originating from this step, so
// that latter steps that will survive will know not to expect more media
// from these streams.
info!(step_id = %step_id, "Removing now unused step id {}", step_id.0);
self.step_definitions.remove(&step_id);
if let Some(mut step) = self.steps_by_definition_id.remove(&step_id) {
let span = span!(Level::INFO, "Step Shutdown", step_id = %step_id);
let _enter = span.enter();
step.instance.take();
}
if let Some(cache) = self.cached_step_media.remove(&step_id) {
for key in cache.keys() {
if let Some(stream) = self.active_streams.get(key) {
if stream.originating_step_id == step_id {
for x in (index + 1)..self.active_steps.len() {
self.step_outputs.clear();
self.step_inputs.clear();
self.step_inputs.media.push(MediaNotification {
stream_id: key.clone(),
content: MediaNotificationContent::StreamDisconnected,
});
self.execute_step(self.active_steps[x]);
}
self.active_streams.remove(key);
}
}
}
}
}
}
// Since some pending steps may not have been around previously, they would not have
// gotten stream started notifications and missing sequence headers. So we need to
// find its parent step's cache and replay any required media notifications
for index in 0..self.pending_steps.len() {
let current_step_id = self.pending_steps[index];
if !self.active_steps.contains(¤t_step_id) {
// This is a new step
let notifications = if index == 0 {
// The first step uses the inbound cache, not step based cache
self.cached_inbound_media
.values()
.flatten()
.cloned()
.collect::<Vec<_>>()
} else {
let previous_step_id = self.pending_steps[index - 1];
if let Some(cache) = self.cached_step_media.get(&previous_step_id) {
cache.values().flatten().cloned().collect::<Vec<_>>()
} else {
Vec::new()
}
};
self.step_inputs.clear();
self.step_inputs.media.extend(notifications);
self.execute_steps(current_step_id, None, true, false);
// TODO: This is probably going to cause duplicate stream started notifications.
// Not sure a way around that and we probably need to remove those warnings.
// TODO: The current code only handles notifications raised by parents of
// new steps. There's the possibility that a change of order of existing
// steps could cause steps to be tracking streams that come in after the step,
// or not know about steps that were created in steps that used to be after but
// is now before. It also means it may have outdated sequence headers if
// a transcoding step was removed.
}
}
std::mem::swap(&mut self.pending_steps, &mut self.active_steps);
self.pending_steps.clear();
info!("All pending steps moved to active");
}
}
fn update_stream_details(&mut self, current_step_id: WorkflowStepId) {
for media in &self.step_outputs.media {
match &media.content {
MediaNotificationContent::Metadata { .. } => (),
MediaNotificationContent::MediaPayload { .. } => (),
MediaNotificationContent::NewIncomingStream { .. } => {
if !self.active_streams.contains_key(&media.stream_id) {
// Since this is the first time we've gotten a new incoming stream
// notification for this stream, assume this this stream originates from
// the current step
self.active_streams.insert(
media.stream_id.clone(),
StreamDetails {
originating_step_id: current_step_id,
},
);
}
}
MediaNotificationContent::StreamDisconnected => {
if let Some(details) = self.active_streams.get(&media.stream_id) {
if details.originating_step_id == current_step_id {
self.active_streams.remove(&media.stream_id);
}
}
}
}
}
}
fn update_inbound_media_cache(&mut self, media: &MediaNotification) {
match media.content {
MediaNotificationContent::NewIncomingStream { .. } => {
let collection = vec![media.clone()];
self.cached_inbound_media
.insert(media.stream_id.clone(), collection);
}
MediaNotificationContent::StreamDisconnected => {
self.cached_inbound_media.remove(&media.stream_id);
}
MediaNotificationContent::MediaPayload {
is_required_for_decoding: true,
..
} => {
if let Some(collection) = self.cached_inbound_media.get_mut(&media.stream_id) {
collection.push(media.clone());
}
}
_ => (),
}
}
fn update_media_cache_from_outputs(&mut self, step_id: WorkflowStepId) {
let step_cache = self.cached_step_media.entry(step_id).or_default();
for media in &self.step_outputs.media {
enum Operation {
Add,
Remove,
Ignore,
}
let operation = match &media.content {
MediaNotificationContent::StreamDisconnected => {
// Stream has ended so no reason to keep the cache around
Operation::Remove
}
MediaNotificationContent::NewIncomingStream { .. } => Operation::Add,
MediaNotificationContent::Metadata { .. } => {
// I *think* we can ignore these, since the sequence headers are really
// what's important to replay
Operation::Ignore
}
MediaNotificationContent::MediaPayload {
is_required_for_decoding,
..
} => {
if *is_required_for_decoding {
Operation::Add
} else {
Operation::Ignore
}
}
};
match operation {
Operation::Ignore => (),
Operation::Remove => {
step_cache.remove(&media.stream_id);
}
Operation::Add => {
let collection = step_cache.entry(media.stream_id.clone()).or_default();
collection.push(media.clone());
}
}
}
}
fn set_status_to_error(&mut self, step_id: WorkflowStepId, message: String) {
error!(
"Workflow set to error state due to step id {}: {}",
step_id.0, message
);
self.status = WorkflowStatus::Error {
failed_step_id: step_id.0,
message,
};
let all_step_ids = self.active_steps.iter().chain(self.pending_steps.iter());
for step_id in all_step_ids {
if let Some(step) = self.steps_by_definition_id.get_mut(step_id) {
step.instance.take(); // drop the step instance
}
}
}
fn get_active_step_index(&self, step_id: WorkflowStepId) -> Option<usize> {
(0..self.active_steps.len()).find(|&index| self.active_steps[index] == step_id)
}
fn handle_executed_step_outputs(&mut self, step_id: WorkflowStepId) {
self.update_stream_details(step_id);
self.update_media_cache_from_outputs(step_id);
self.step_inputs.clear();
self.step_inputs.media.append(&mut self.step_outputs.media);
self.step_outputs.clear();
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/steps/futures_channel.rs | mmids-core/src/workflows/steps/futures_channel.rs | //! This module provides abstractions over MPSC channels, which make it easy for workflow steps
//! to execute a future and send the results of those futures back to the correct workflow runner
//! with minimal allocations.
use crate::workflows::definitions::WorkflowStepId;
use crate::workflows::steps::StepFutureResult;
use crate::workflows::MediaNotification;
use std::future::Future;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tokio_util::sync::CancellationToken;
/// An channel which can be used by workflow steps to send future completion results to the
/// workflow runner.
#[derive(Clone)]
pub struct WorkflowStepFuturesChannel {
step_id: WorkflowStepId,
sender: UnboundedSender<FuturesChannelResult>,
}
/// The type of information that's returned to the workflow upon a future's completion
pub struct FuturesChannelResult {
pub step_id: WorkflowStepId,
pub result: FuturesChannelInnerResult,
}
/// The type of result being sent over the future channel
pub enum FuturesChannelInnerResult {
/// Declares the result is a type that implements the `StepFutureResult` trait, and therefore
/// is only readable by the raising step itself.
Generic(Box<dyn StepFutureResult>),
/// The result of the future is a strongly typed media notification that is ready to be passed
/// on to the next step in the workflow. Media notifications raised in this manner *will not*
/// be passed back to the step whose future produced it.
Media(MediaNotification),
}
impl WorkflowStepFuturesChannel {
pub fn new(step_id: WorkflowStepId, sender: UnboundedSender<FuturesChannelResult>) -> Self {
WorkflowStepFuturesChannel { step_id, sender }
}
/// Sends the workflow step's future result over the channel. Returns an error if the channel
/// is closed.
pub fn send(
&self,
message: FuturesChannelInnerResult,
) -> Result<(), FuturesChannelInnerResult> {
let message = FuturesChannelResult {
step_id: self.step_id,
result: message,
};
self.sender.send(message).map_err(|e| e.0.result)
}
/// Completes when the channel is closed due to there being no receiver
pub async fn closed(&self) {
self.sender.closed().await
}
/// Helper function for workflow steps to watch a receiver for messages, and send them back
/// to the workflow step for processing.
///
/// This can send any type of future result.
pub fn send_on_unbounded_recv<ReceiverMessage>(
&self,
mut receiver: UnboundedReceiver<ReceiverMessage>,
on_recv: impl Fn(ReceiverMessage) -> FuturesChannelInnerResult + Send + 'static,
on_closed: impl FnOnce() -> FuturesChannelInnerResult + Send + 'static,
) where
ReceiverMessage: Send + 'static,
{
let channel = self.clone();
tokio::spawn(async move {
loop {
tokio::select! {
message = receiver.recv() => {
match message {
Some(message) => {
let future_result = on_recv(message);
let _ = channel.send(future_result);
}
None => {
let future_result = on_closed();
let _ = channel.send(future_result);
break;
}
}
}
_ = channel.closed() => {
break;
}
}
}
});
}
/// Helper function for workflow steps to watch a receiver for messages, and send them back
/// to the workflow step for processing.
///
/// This only sends a generic `StepFutureResult` value.
pub fn send_on_generic_unbounded_recv<ReceiverMessage, FutureResult>(
&self,
mut receiver: UnboundedReceiver<ReceiverMessage>,
on_recv: impl Fn(ReceiverMessage) -> FutureResult + Send + 'static,
on_closed: impl FnOnce() -> FutureResult + Send + 'static,
) where
ReceiverMessage: Send + 'static,
FutureResult: StepFutureResult + Send + 'static,
{
let channel = self.clone();
tokio::spawn(async move {
loop {
tokio::select! {
message = receiver.recv() => {
match message {
Some(message) => {
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_recv(message))
);
let _ = channel.send(future_result);
}
None => {
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_closed())
);
let _ = channel.send(future_result);
break;
}
}
}
_ = channel.closed() => {
break;
}
}
}
});
}
/// Helper function for workflow steps to watch a receiver for messages, and send them back
/// to the workflow step for processing. Cancellable via a token.
///
/// This only sends a generic `StepFutureResult` value.
pub fn send_on_generic_unbounded_recv_cancellable<ReceiverMessage, FutureResult>(
&self,
mut receiver: UnboundedReceiver<ReceiverMessage>,
cancellation_token: CancellationToken,
on_recv: impl Fn(ReceiverMessage) -> FutureResult + Send + 'static,
on_closed: impl FnOnce() -> FutureResult + Send + 'static,
on_cancelled: impl FnOnce() -> FutureResult + Send + 'static,
) where
ReceiverMessage: Send + 'static,
FutureResult: StepFutureResult + Send + 'static,
{
let channel = self.clone();
tokio::spawn(async move {
loop {
tokio::select! {
message = receiver.recv() => {
match message {
Some(message) => {
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_recv(message))
);
let _ = channel.send(future_result);
}
None => {
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_closed())
);
let _ = channel.send(future_result);
break;
}
}
}
_ = cancellation_token.cancelled() => {
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_cancelled())
);
let _ = channel.send(future_result);
break;
}
_ = channel.closed() => {
// Nothing ot send since the channel is closed
break;
}
}
}
});
}
/// Helper function for workflow steps to track a tokio watch receiver for messages, and send
/// them back to the workflow step for processing.
///
/// This only sends a generic `StepFutureResult` value.
pub fn send_on_generic_watch_recv<ReceiverMessage, FutureResult>(
&self,
mut receiver: tokio::sync::watch::Receiver<ReceiverMessage>,
on_recv: impl Fn(&ReceiverMessage) -> FutureResult + Send + 'static,
on_closed: impl FnOnce() -> FutureResult + Send + 'static,
) where
ReceiverMessage: Send + Sync + 'static,
FutureResult: StepFutureResult + Send + 'static,
{
let channel = self.clone();
tokio::spawn(async move {
loop {
tokio::select! {
message = receiver.changed() => {
match message {
Ok(_) => {
let value = receiver.borrow();
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_recv(&value))
);
let _ = channel.send(future_result);
}
Err(_) => {
let future_result = FuturesChannelInnerResult::Generic(
Box::new(on_closed())
);
let _ = channel.send(future_result);
break;
}
}
}
_ = channel.closed() => {
break;
}
}
}
});
}
/// Helper function for workflow steps to easily send a message upon future completion.
///
/// This only sends a generic `StepFutureResult` value.
pub fn send_on_generic_future_completion(
&self,
future: impl Future<Output = impl StepFutureResult + Send> + Send + 'static,
) {
let channel = self.clone();
tokio::spawn(async move {
tokio::select! {
result = future => {
let _ = channel.send(FuturesChannelInnerResult::Generic(Box::new(result)));
}
_ = channel.closed() => {
// No where to send the result, so cancel the future by exiting
}
}
});
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/steps/test_utils.rs | mmids-core/src/workflows/steps/test_utils.rs | use crate::workflows::steps::factory::StepGenerator;
use crate::workflows::steps::futures_channel::FuturesChannelResult;
use crate::workflows::steps::futures_channel::{
FuturesChannelInnerResult, WorkflowStepFuturesChannel,
};
use crate::workflows::definitions::WorkflowStepDefinition;
use crate::workflows::steps::{
StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use crate::workflows::MediaNotification;
use anyhow::{anyhow, Result};
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::time::timeout;
pub struct StepTestContext {
pub step: Box<dyn WorkflowStep>,
pub status: StepStatus,
pub media_outputs: Vec<MediaNotification>,
pub futures_channel_sender: WorkflowStepFuturesChannel,
futures_channel_receiver: UnboundedReceiver<FuturesChannelResult>,
}
impl StepTestContext {
pub fn new(
generator: Box<dyn StepGenerator>,
definition: WorkflowStepDefinition,
) -> Result<Self> {
let (sender, receiver) = unbounded_channel();
let channel = WorkflowStepFuturesChannel::new(definition.get_id(), sender);
let (step, status) = generator
.generate(definition, channel.clone())
.map_err(|error| anyhow!("Failed to generate workflow step: {:?}", error))?;
Ok(StepTestContext {
step,
status,
media_outputs: Vec::new(),
futures_channel_sender: channel,
futures_channel_receiver: receiver,
})
}
pub fn execute_with_media(&mut self, media: MediaNotification) {
let mut outputs = StepOutputs::new();
let mut inputs = StepInputs::new();
inputs.media.push(media);
let status = self.step.execute(
&mut inputs,
&mut outputs,
self.futures_channel_sender.clone(),
);
self.media_outputs = outputs.media;
self.status = status;
}
pub async fn execute_notification(&mut self, notification: Box<dyn StepFutureResult>) {
let mut outputs = StepOutputs::new();
let mut inputs = StepInputs::new();
inputs.notifications.push(notification);
let status = self.step.execute(
&mut inputs,
&mut outputs,
self.futures_channel_sender.clone(),
);
self.media_outputs = outputs.media;
self.status = status;
self.execute_pending_futures().await;
}
pub async fn execute_pending_futures(&mut self) {
self.media_outputs.clear();
loop {
let duration = Duration::from_millis(10);
let future = self.futures_channel_receiver.recv();
// We explicitly want to do a timeout instead of `try_recv` to ensure that any futures
// that are triggered get a chance to execute and complete. This is important since it's
// single threaded. A `yield()` may work but this is probably more reliable to give us
// enough time.
let notification = match timeout(duration, future).await {
Ok(Some(notification)) => notification,
_ => break,
};
let mut outputs = StepOutputs::new();
let mut inputs = StepInputs::new();
match notification.result {
FuturesChannelInnerResult::Generic(notification) => {
inputs.notifications.push(notification)
}
FuturesChannelInnerResult::Media(media) => {
// Media raised as a result goes to the next step, not the current step, so
// it just gets added directly as a step output.
self.media_outputs.push(media);
continue;
}
};
let status = self.step.execute(
&mut inputs,
&mut outputs,
self.futures_channel_sender.clone(),
);
self.media_outputs.extend(outputs.media);
self.status = status;
}
}
pub fn assert_media_passed_through(&mut self, media: MediaNotification) {
self.execute_with_media(media.clone());
assert_eq!(
self.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
assert_eq!(self.media_outputs[0], media, "Unexpected media message");
}
pub fn assert_media_not_passed_through(&mut self, media: MediaNotification) {
self.execute_with_media(media);
assert!(self.media_outputs.is_empty(), "Expected no media outputs");
}
/// Gets the first future that was resolved on the workflow step futures channel. If no future
/// is resolved, then a panic will ensue.
pub async fn expect_future_resolved(&mut self) -> FuturesChannelInnerResult {
let future = self.futures_channel_receiver.recv();
match timeout(Duration::from_millis(10), future).await {
Ok(Some(response)) => response.result,
_ => panic!("No future resolved within timeout period"),
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/steps/mod.rs | mmids-core/src/workflows/steps/mod.rs | //! Workflow steps are individual actions that can be taken on media as part of a media pipeline.
pub mod factory;
pub mod futures_channel;
pub mod workflow_forwarder;
#[cfg(feature = "test-utils")]
pub mod test_utils;
use super::MediaNotification;
use crate::workflows::definitions::WorkflowStepDefinition;
use crate::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use downcast_rs::{impl_downcast, Downcast};
/// Represents the result of a future for a workflow step. It is expected that the workflow step
/// will downcast this result into a struct that it owns.
pub trait StepFutureResult: Downcast + Send {}
impl_downcast!(StepFutureResult);
/// The type that is returned by workflow step generators. On the successful generation of
/// workflow steps, the usable instance of the step is returned as well as its initial status.
pub type StepCreationResult = Result<
(Box<dyn WorkflowStep + Sync + Send>, StepStatus),
Box<dyn std::error::Error + Sync + Send>,
>;
pub type CreateFactoryFnResult =
Box<dyn Fn(&WorkflowStepDefinition) -> StepCreationResult + Send + Sync>;
/// Various statuses of an individual step
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum StepStatus {
/// The step has been created but it is not yet ready to handle media
Created,
/// The step is fully active and ready for handling media
Active,
/// The step has encountered an unrecoverable error and can no longer handle media or
/// notifications.
Error { message: String },
/// The step has been shut down and is not expected to be invoked anymore. Workflow steps
/// that have been shut down must be regenerated to be used.
Shutdown,
}
/// Inputs to be passed in for execution of a workflow step.
#[derive(Default)]
pub struct StepInputs {
/// Media notifications that the step may be interested in
pub media: Vec<MediaNotification>,
/// Any resolved futures that are specific to this step
pub notifications: Vec<Box<dyn StepFutureResult>>,
}
impl StepInputs {
pub fn new() -> Self {
Default::default()
}
pub fn clear(&mut self) {
self.media.clear();
self.notifications.clear();
}
}
/// Resulting outputs that come from executing a workflow step.
#[derive(Default)]
pub struct StepOutputs {
/// Media notifications that the workflow step intends to pass to the next workflow step
pub media: Vec<MediaNotification>,
}
impl StepOutputs {
pub fn new() -> Self {
Default::default()
}
pub fn clear(&mut self) {
self.media.clear();
}
}
/// Represents a workflow step that can be executed
pub trait WorkflowStep {
/// Executes the workflow step with the specified media and future resolution inputs. Any outputs
/// that are generated as a result of this execution will be placed in the `outputs` parameter,
/// to allow vectors to be re-used.
///
/// This function returns the status the step should be considered in after its execution.
/// An error state being returned will cause the workflow step to be dropped.
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus;
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/steps/factory.rs | mmids-core/src/workflows/steps/factory.rs | use crate::workflows::definitions::{WorkflowStepDefinition, WorkflowStepType};
use crate::workflows::steps::futures_channel::{FuturesChannelResult, WorkflowStepFuturesChannel};
use crate::workflows::steps::StepCreationResult;
use std::collections::HashMap;
use thiserror::Error;
use tokio::sync::mpsc::UnboundedSender;
/// Represents a type that can generate an instance of a workflow step
pub trait StepGenerator {
/// Creates a brand new instance of a workflow step based on the supplied definition. Generating
/// a workflow step returns both the workflow step itself as well as the initial status the
/// step should be in.
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult;
}
/// The workflow step factory allows consumers to register different workflow step generation
/// instances to use for specific workflow step types. Consumers can then request the factory
/// to generate workflow steps based on the passed in step definition.
#[derive(Default)]
pub struct WorkflowStepFactory {
generators: HashMap<WorkflowStepType, Box<dyn StepGenerator + Sync + Send>>,
}
/// Errors that can occur when an attempting to register a generator fails
#[derive(Error, Debug)]
pub enum FactoryRegistrationError {
#[error(
"The workflow step factory already has a step generator registered with the type '{0}'"
)]
DuplicateName(WorkflowStepType),
}
/// Errors that can occur when an attempt to generate a workflow step fails
#[derive(Error, Debug)]
pub enum FactoryCreateError {
#[error("No workflow step generator is registered for the type '{0}'")]
NoRegisteredStep(WorkflowStepType),
}
impl WorkflowStepFactory {
/// Creates a new workflow step factory, with an empty registration
pub fn new() -> Self {
Default::default()
}
/// Attempts to register a specific generator instance with the specified
pub fn register(
&mut self,
step_type: WorkflowStepType,
generator: Box<dyn StepGenerator + Sync + Send>,
) -> Result<(), FactoryRegistrationError> {
if self.generators.contains_key(&step_type) {
return Err(FactoryRegistrationError::DuplicateName(step_type));
}
self.generators.insert(step_type, generator);
Ok(())
}
/// Attempts to create a new instance of a workflow step based on a specified definition
pub(crate) fn create_step(
&self,
definition: WorkflowStepDefinition,
futures_channel: &UnboundedSender<FuturesChannelResult>,
) -> Result<StepCreationResult, FactoryCreateError> {
let generator = match self.generators.get(&definition.step_type) {
Some(generator) => generator,
None => return Err(FactoryCreateError::NoRegisteredStep(definition.step_type)),
};
let futures_channel =
WorkflowStepFuturesChannel::new(definition.get_id(), futures_channel.clone());
Ok(generator.generate(definition, futures_channel))
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/steps/workflow_forwarder/tests.rs | mmids-core/src/workflows/steps/workflow_forwarder/tests.rs | use super::*;
use crate::test_utils;
use crate::workflows::definitions::WorkflowStepType;
use crate::workflows::metadata::MediaPayloadMetadataCollection;
use crate::workflows::steps::futures_channel::FuturesChannelInnerResult;
use crate::workflows::steps::test_utils::StepTestContext;
use crate::workflows::MediaType;
use anyhow::{anyhow, Result};
use bytes::{Bytes, BytesMut};
use std::iter;
use std::sync::Arc;
use std::time::Duration;
struct TestContext {
reactor_manager: UnboundedReceiver<ReactorManagerRequest>,
_event_hub: UnboundedReceiver<SubscriptionRequest>,
step_context: StepTestContext,
workflow_sender: UnboundedSender<WorkflowRequest>,
workflow_receiver: UnboundedReceiver<WorkflowRequest>,
workflow_event_channel: UnboundedSender<WorkflowStartedOrStoppedEvent>,
}
impl TestContext {
async fn new(specific_workflow: Option<&str>, reactor: Option<&str>) -> Result<Self> {
if specific_workflow.is_some() && reactor.is_some() {
return Err(anyhow!(
"Both workflow and reactor names specified. Only one should be"
));
}
if specific_workflow.is_none() && reactor.is_none() {
return Err(anyhow!(
"Neither workflow or reactor name specified. One must be"
));
}
let (reactor_sender, reactor_receiver) = unbounded_channel();
let (workflow_sender, workflow_receiver) = unbounded_channel();
let (sub_sender, mut sub_receiver) = unbounded_channel();
let generator = WorkflowForwarderStepGenerator::new(sub_sender, reactor_sender);
let mut definition = WorkflowStepDefinition {
step_type: WorkflowStepType("".to_string()),
parameters: HashMap::new(),
};
if let Some(reactor) = reactor {
definition
.parameters
.insert(REACTOR_NAME.to_string(), Some(reactor.to_string()));
}
if let Some(workflow) = specific_workflow {
definition
.parameters
.insert(TARGET_WORKFLOW.to_string(), Some(workflow.to_string()));
}
let step_context = StepTestContext::new(Box::new(generator), definition)?;
// It must send a subscription event on startup
let event = test_utils::expect_mpsc_response(&mut sub_receiver).await;
let channel = match event {
SubscriptionRequest::WorkflowStartedOrStopped { channel } => channel,
event => panic!("Unexpected event: {:?}", event),
};
Ok(TestContext {
step_context,
workflow_sender,
workflow_receiver,
_event_hub: sub_receiver,
reactor_manager: reactor_receiver,
workflow_event_channel: channel,
})
}
async fn send_workflow_started_event(
&mut self,
name: &str,
sender: Option<UnboundedSender<WorkflowRequest>>,
) {
self.workflow_event_channel
.send(WorkflowStartedOrStoppedEvent::WorkflowStarted {
name: Arc::new(name.to_string()),
channel: if let Some(sender) = sender {
sender
} else {
self.workflow_sender.clone()
},
})
.expect("Failed to send workflow started event");
let result = self.step_context.expect_future_resolved().await;
match result {
FuturesChannelInnerResult::Generic(result) => {
self.step_context.execute_notification(result).await;
}
FuturesChannelInnerResult::Media(_) => {
panic!("Expected a generic step future result but instead got media packet");
}
}
}
async fn send_workflow_stopped_event(&mut self, name: &str) {
self.workflow_event_channel
.send(WorkflowStartedOrStoppedEvent::WorkflowEnded {
name: Arc::new(name.to_string()),
})
.expect("Failed to send workflow ended event");
let result = self.step_context.expect_future_resolved().await;
match result {
FuturesChannelInnerResult::Generic(result) => {
self.step_context.execute_notification(result).await;
}
FuturesChannelInnerResult::Media(_) => {
panic!("Expected a generic step future result but instead got media packet");
}
}
}
}
#[tokio::test]
async fn new_stream_message_sent_to_global_workflow() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.send_workflow_started_event("test", None).await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let response = test_utils::expect_mpsc_response(&mut context.workflow_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => {
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
operation => panic!("Unexpected workflow operation: {:?}", operation),
}
}
#[tokio::test]
async fn new_stream_message_sent_if_workflow_started_after_message_comes_in() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
context.send_workflow_started_event("test", None).await;
let response = test_utils::expect_mpsc_response(&mut context.workflow_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => {
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
operation => panic!("Unexpected workflow operation: {:?}", operation),
}
}
#[tokio::test]
async fn no_message_passed_if_workflow_has_different_name_than_global_name() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.send_workflow_started_event("test2", None).await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
}
#[tokio::test]
async fn no_message_passed_if_workflow_stopped_before_media_sent() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.send_workflow_started_event("test", None).await;
context.send_workflow_stopped_event("test").await;
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
}
#[tokio::test]
async fn no_message_passed_if_stream_disconnected_before_workflow_started() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
context.send_workflow_started_event("test", None).await;
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
}
#[tokio::test]
async fn new_stream_media_passed_as_output_immediately() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn stream_disconnected_media_passed_as_output_immediately() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match &media.content {
MediaNotificationContent::StreamDisconnected => (),
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn media_passed_as_output_immediately() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
let expected_content = MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2, 3]),
payload_type: Arc::new("test".to_string()),
media_type: MediaType::Audio,
is_required_for_decoding: true,
timestamp: Duration::from_millis(10),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
};
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: expected_content.clone(),
});
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
assert_eq!(media.content, expected_content, "Unexpected media content");
}
#[tokio::test]
async fn metadata_media_passed_as_output_immediately() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
let mut metadata = HashMap::new();
metadata.insert("a".to_string(), "b".to_string());
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::Metadata {
data: metadata.clone(),
},
});
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match &media.content {
MediaNotificationContent::Metadata { data } => {
assert_eq!(data, &metadata, "Unexpected metadata");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
#[tokio::test]
async fn required_media_payload_sent_to_workflow_when_received_before_workflow_starts() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let expected_content = MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2, 3]),
payload_type: Arc::new("test".to_string()),
media_type: MediaType::Other,
is_required_for_decoding: true,
timestamp: Duration::from_millis(10),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
};
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: expected_content.clone(),
});
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
context.send_workflow_started_event("test", None).await;
let response = test_utils::expect_mpsc_response(&mut context.workflow_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => match media.content {
MediaNotificationContent::NewIncomingStream { .. } => (),
content => panic!("Unexpected media content: {:?}", content),
},
operation => panic!("Unexpected workflow operation: {:?}", operation),
}
let response = test_utils::expect_mpsc_response(&mut context.workflow_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => {
assert_eq!(media.content, expected_content, "Unexpected media content");
}
operation => panic!("Unexpected workflow operation: {:?}", operation),
}
}
#[tokio::test]
async fn non_required_payload_not_sent_to_workflow_when_received_before_workflow_starts() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let expected_content = MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2, 3]),
payload_type: Arc::new("test".to_string()),
media_type: MediaType::Other,
is_required_for_decoding: false,
timestamp: Duration::from_millis(10),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
};
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: expected_content.clone(),
});
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
context.send_workflow_started_event("test", None).await;
let response = test_utils::expect_mpsc_response(&mut context.workflow_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => match media.content {
MediaNotificationContent::NewIncomingStream { .. } => (),
content => panic!("Unexpected media content: {:?}", content),
},
operation => panic!("Unexpected workflow operation: {:?}", operation),
}
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
}
#[tokio::test]
async fn metadata_not_sent_when_received_before_workflow_starts() {
let mut context = TestContext::new(Some("test"), None).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::Metadata {
data: HashMap::new(),
},
});
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
context.send_workflow_started_event("test", None).await;
let response = test_utils::expect_mpsc_response(&mut context.workflow_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => match media.content {
MediaNotificationContent::NewIncomingStream { .. } => (),
content => panic!("Unexpected media content: {:?}", content),
},
operation => panic!("Unexpected workflow operation: {:?}", operation),
}
test_utils::expect_mpsc_timeout(&mut context.workflow_receiver).await;
}
#[tokio::test]
async fn new_stream_triggers_reactor_query() {
let mut context = TestContext::new(None, Some("test")).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let response = test_utils::expect_mpsc_response(&mut context.reactor_manager).await;
match response {
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name,
stream_name,
..
} => {
assert_eq!(reactor_name.as_str(), "test", "Unexpected reactor name");
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
response => panic!("Unexpected request: {:?}", response),
}
}
#[tokio::test]
async fn new_stream_passed_to_all_specified_routable_workflow() {
let mut context = TestContext::new(None, Some("test")).await.unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let response = test_utils::expect_mpsc_response(&mut context.reactor_manager).await;
match response {
ReactorManagerRequest::CreateWorkflowForStreamName {
response_channel, ..
} => {
let mut workflows = HashSet::new();
workflows.insert(Arc::new("first".to_string()));
workflows.insert(Arc::new("second".to_string()));
response_channel
.send(ReactorWorkflowUpdate {
is_valid: true,
routable_workflow_names: workflows,
})
.expect("Failed to send reactor response");
}
response => panic!("Unexpected request: {:?}", response),
}
tokio::time::sleep(Duration::from_millis(10)).await;
let (w1_sender, mut w1_receiver) = unbounded_channel();
let (w2_sender, mut w2_receiver) = unbounded_channel();
context
.send_workflow_started_event("first", Some(w1_sender))
.await;
context
.send_workflow_started_event("second", Some(w2_sender))
.await;
let response = test_utils::expect_mpsc_response(&mut w1_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => {
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
operation => panic!("Unexpected operation: {:?}", operation),
}
let response = test_utils::expect_mpsc_response(&mut w2_receiver).await;
match response.operation {
WorkflowRequestOperation::MediaNotification { media } => {
assert_eq!(media.stream_id.0.as_str(), "abc", "Unexpected stream id");
match media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
assert_eq!(stream_name.as_str(), "def", "Unexpected stream name");
}
content => panic!("Unexpected media content: {:?}", content),
}
}
operation => panic!("Unexpected operation: {:?}", operation),
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/workflows/steps/workflow_forwarder/mod.rs | mmids-core/src/workflows/steps/workflow_forwarder/mod.rs | //! The workflow forwarder step takes all media notifications it receives and sends them to the
//! specified workflow, using the workflow media relay. All media notifications are also passed
//! to subsequent steps.
#[cfg(test)]
mod tests;
use crate::event_hub::{SubscriptionRequest, WorkflowStartedOrStoppedEvent};
use crate::reactors::manager::ReactorManagerRequest;
use crate::reactors::ReactorWorkflowUpdate;
use crate::workflows::definitions::WorkflowStepDefinition;
use crate::workflows::steps::factory::StepGenerator;
use crate::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use crate::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use crate::workflows::{
MediaNotification, MediaNotificationContent, WorkflowRequest, WorkflowRequestOperation,
};
use crate::StreamId;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio_util::sync::CancellationToken;
use tracing::{error, info, span, Level};
pub const TARGET_WORKFLOW: &str = "target_workflow";
pub const REACTOR_NAME: &str = "reactor";
/// Generates a new workflow forwarder step
pub struct WorkflowForwarderStepGenerator {
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
}
struct StreamDetails {
target_workflow_names: HashSet<Arc<String>>,
required_media: Vec<MediaNotification>,
// Used to cancel the reactor update future. When a stream disconnects, this cancellation
// channel will be dropped causing the future waiting for reactor updates to be closed. This
// will inform the reactor that this step is no longer interested in whatever workflow it was
// managing for it.
cancellation_token: Option<CancellationToken>,
}
impl Drop for StreamDetails {
fn drop(&mut self) {
if let Some(token) = self.cancellation_token.take() {
token.cancel();
}
}
}
struct WorkflowForwarderStep {
global_workflow_name: Option<Arc<String>>,
reactor_name: Option<Arc<String>>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
active_streams: HashMap<StreamId, StreamDetails>,
stream_for_workflow_name: HashMap<Arc<String>, HashSet<StreamId>>,
known_workflows: HashMap<Arc<String>, UnboundedSender<WorkflowRequest>>,
}
enum FutureResult {
EventHubGone,
ReactorManagerGone,
ReactorGone,
WorkflowGone {
workflow_name: Arc<String>,
},
WorkflowStartedOrStopped(WorkflowStartedOrStoppedEvent),
ReactorResponseReceived {
stream_id: StreamId,
stream_name: Arc<String>,
update: ReactorWorkflowUpdate,
reactor_update_channel: UnboundedReceiver<ReactorWorkflowUpdate>,
},
ReactorUpdateReceived {
stream_id: StreamId,
update: ReactorWorkflowUpdate,
},
ReactorCancellationReceived {
stream_id: StreamId,
},
}
impl StepFutureResult for FutureResult {}
#[derive(Error, Debug)]
enum StepStartupError {
#[error("A {} or {} value must be specified", TARGET_WORKFLOW, REACTOR_NAME)]
NoTargetWorkflowSpecified,
#[error("A target workflow and reactor were specified. Only one can be used at a time")]
ReactorAndTargetWorkflowBothSpecified,
}
impl WorkflowForwarderStepGenerator {
pub fn new(
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
reactor_manager: UnboundedSender<ReactorManagerRequest>,
) -> Self {
WorkflowForwarderStepGenerator {
event_hub_subscriber,
reactor_manager,
}
}
}
impl StepGenerator for WorkflowForwarderStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let target_workflow_name = match definition.parameters.get(TARGET_WORKFLOW) {
Some(Some(name)) => Some(Arc::new(name.clone())),
_ => None,
};
let reactor_name = match definition.parameters.get(REACTOR_NAME) {
Some(Some(reactor)) => Some(Arc::new(reactor.clone())),
_ => None,
};
if reactor_name.is_none() && target_workflow_name.is_none() {
return Err(Box::new(StepStartupError::NoTargetWorkflowSpecified));
}
if reactor_name.is_some() && target_workflow_name.is_some() {
return Err(Box::new(
StepStartupError::ReactorAndTargetWorkflowBothSpecified,
));
}
let (event_sender, event_receiver) = unbounded_channel();
let _ = self
.event_hub_subscriber
.send(SubscriptionRequest::WorkflowStartedOrStopped {
channel: event_sender,
});
let step = WorkflowForwarderStep {
global_workflow_name: target_workflow_name,
reactor_name,
stream_for_workflow_name: HashMap::new(),
active_streams: HashMap::new(),
reactor_manager: self.reactor_manager.clone(),
known_workflows: HashMap::new(),
};
notify_on_workflow_event(event_receiver, &futures_channel);
notify_reactor_manager_gone(self.reactor_manager.clone(), &futures_channel);
Ok((Box::new(step), StepStatus::Active))
}
}
impl WorkflowForwarderStep {
fn handle_workflow_event(
&mut self,
event: WorkflowStartedOrStoppedEvent,
futures_channel: &WorkflowStepFuturesChannel,
) {
match event {
WorkflowStartedOrStoppedEvent::WorkflowStarted { name, channel } => {
// We need to track all workflows started, in case we need the channel of a workflow
// that starts after the reactor lets us know its relevant to a stream
self.known_workflows.insert(name.clone(), channel.clone());
{
let channel = channel.clone();
let name = name.clone();
futures_channel.send_on_generic_future_completion(async move {
channel.closed().await;
FutureResult::WorkflowGone {
workflow_name: name,
}
})
}
if let Some(stream_ids) = self.stream_for_workflow_name.get(&name) {
info!(
workflow_name = %name,
"Received notification that workflow {} has started", name
);
for stream_id in stream_ids {
if let Some(stream) = self.active_streams.get_mut(stream_id) {
for media in &stream.required_media {
let _ = channel.send(WorkflowRequest {
request_id: "sourced-from-workflow-forwarder".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: media.clone(),
},
});
}
}
}
}
}
WorkflowStartedOrStoppedEvent::WorkflowEnded { name } => {
self.known_workflows.remove(&name);
if self.stream_for_workflow_name.contains_key(&name) {
info!(
workflow_name = %name,
"Received notification that workflow {} has stopped", name
);
}
}
}
}
fn handle_media(
&mut self,
media: MediaNotification,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
if !self.active_streams.contains_key(&media.stream_id) {
let mut stream_details = StreamDetails {
target_workflow_names: HashSet::new(),
required_media: vec![media.clone()],
cancellation_token: None,
};
if let Some(workflow) = &self.global_workflow_name {
stream_details
.target_workflow_names
.insert(workflow.clone());
let entry = self
.stream_for_workflow_name
.entry(workflow.clone())
.or_default();
entry.insert(media.stream_id.clone());
}
if let Some(reactor) = &self.reactor_name {
let (sender, mut receiver) = unbounded_channel();
let _ = self.reactor_manager.send(
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name: reactor.clone(),
stream_name: stream_name.clone(),
response_channel: sender,
},
);
let stream_id = media.stream_id.clone();
let stream_name = stream_name.clone();
futures_channel.send_on_generic_future_completion(async move {
let result = match receiver.recv().await {
Some(response) => response,
None => ReactorWorkflowUpdate {
is_valid: false,
routable_workflow_names: HashSet::new(),
},
};
FutureResult::ReactorResponseReceived {
stream_id,
stream_name,
update: result,
reactor_update_channel: receiver,
}
});
}
self.active_streams
.insert(media.stream_id.clone(), stream_details);
}
}
MediaNotificationContent::StreamDisconnected => {
if let Some(stream) = self.active_streams.remove(&media.stream_id) {
for workflow in &stream.target_workflow_names {
if let Some(channel) = self.known_workflows.get(workflow) {
let _ = channel.send(WorkflowRequest {
request_id: "from-workflow-forwarder_disconnection".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: media.clone(),
},
});
}
if let Some(stream_ids) = self.stream_for_workflow_name.get_mut(workflow) {
stream_ids.remove(&media.stream_id);
if stream_ids.is_empty() {
self.stream_for_workflow_name.remove(workflow);
}
}
}
}
}
MediaNotificationContent::Metadata { .. } => {
// I don't think this can be considered required, as I think closed captions and
// other data will come down as metadata that we don't want to permanently store.
}
MediaNotificationContent::MediaPayload {
is_required_for_decoding: true,
..
} => {
if let Some(stream) = self.active_streams.get_mut(&media.stream_id) {
stream.required_media.push(media.clone());
}
}
_ => (),
}
if let Some(stream) = self.active_streams.get(&media.stream_id) {
for workflow_name in &stream.target_workflow_names {
if let Some(channel) = self.known_workflows.get(workflow_name) {
let _ = channel.send(WorkflowRequest {
request_id: "sourced-from-workflow_forwarder".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: media.clone(),
},
});
}
}
}
outputs.media.push(media);
}
fn handle_reactor_update(&mut self, stream_id: StreamId, update: ReactorWorkflowUpdate) {
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
if update.is_valid {
let new_workflows = update
.routable_workflow_names
.iter()
.filter(|x| !stream.target_workflow_names.contains(*x))
.cloned()
.collect::<Vec<_>>();
let removed_workflows = stream
.target_workflow_names
.iter()
.filter(|x| !update.routable_workflow_names.contains(*x))
.cloned()
.collect::<Vec<_>>();
if !new_workflows.is_empty() || !removed_workflows.is_empty() {
info!(
stream_id = ?stream_id,
new_workflows = %new_workflows.len(),
removed_workflows = %removed_workflows.len(),
"Reactor sent update for stream {:?} with {} new workflows and {} removed workflows",
stream_id, new_workflows.len(), removed_workflows.len(),
);
}
// Remove target workflows and send disconnection message to removed workflows
for workflow in removed_workflows {
stream.target_workflow_names.remove(&workflow);
if let Some(channel) = self.known_workflows.get(&workflow) {
let _ = channel.send(WorkflowRequest {
request_id: "workflow_forwarder_reactor_update".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::StreamDisconnected,
},
},
});
}
if let Some(stream_ids) = self.stream_for_workflow_name.get_mut(&workflow) {
stream_ids.remove(&stream_id);
if stream_ids.is_empty() {
self.stream_for_workflow_name.remove(&workflow);
}
}
}
// Add new target workflows and send required media to new workflows
for workflow in new_workflows {
stream.target_workflow_names.insert(workflow.clone());
if let Some(channel) = self.known_workflows.get(&workflow) {
for media in &stream.required_media {
let _ = channel.send(WorkflowRequest {
request_id: "workflow_forwarder_reactor_update".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: media.clone(),
},
});
}
}
}
for workflow in update.routable_workflow_names {
let entry = self
.stream_for_workflow_name
.entry(workflow.clone())
.or_default();
entry.insert(stream_id.clone());
}
} else {
// This stream is no longer valid according to the reactor
for workflow in stream.target_workflow_names.drain() {
// Send disconnection message to workflow
if let Some(channel) = self.known_workflows.get(&workflow) {
let _ = channel.send(WorkflowRequest {
request_id: "workflow_forwarder_reactor_update".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::StreamDisconnected,
},
},
});
}
if let Some(stream_ids) = self.stream_for_workflow_name.get_mut(&workflow) {
stream_ids.remove(&stream_id);
if stream_ids.is_empty() {
self.stream_for_workflow_name.remove(&workflow);
}
}
}
}
}
}
}
impl WorkflowStep for WorkflowForwarderStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for notification in inputs.notifications.drain(..) {
let notification: Box<dyn StepFutureResult> = notification;
let future_result = match notification.downcast::<FutureResult>() {
Ok(x) => *x,
Err(_) => {
error!(
"Workflow forwarder step received a notification that is not a known type"
);
return StepStatus::Error {
message: "Received future result of unknown type".to_string(),
};
}
};
match future_result {
FutureResult::EventHubGone => {
error!("Received a notification that the event hub is gone");
return StepStatus::Error {
message: "Event hub gone".to_string(),
};
}
FutureResult::ReactorManagerGone => {
error!("Reactor manager is gone");
return StepStatus::Error {
message: "Reactor manager gone".to_string(),
};
}
FutureResult::ReactorGone => {
if let Some(name) = &self.reactor_name {
error!("Reactor {} is gone", name);
} else {
error!("Received notice that a reactor is gone but we aren't using one");
}
return StepStatus::Error {
message: "Reactor gone".to_string(),
};
}
FutureResult::ReactorResponseReceived {
stream_id,
stream_name,
update,
reactor_update_channel,
} => {
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
let span = span!(Level::INFO, "Reactor response received", stream_name = %stream_name);
let _enter = span.enter();
let cancellation_token = CancellationToken::new();
let cancellation_receiver = cancellation_token.child_token();
stream.cancellation_token = Some(cancellation_token);
notify_on_reactor_update(
stream_id.clone(),
reactor_update_channel,
cancellation_receiver,
&futures_channel,
);
self.handle_reactor_update(stream_id, update);
}
}
FutureResult::ReactorUpdateReceived { stream_id, update } => {
self.handle_reactor_update(stream_id, update);
}
FutureResult::WorkflowGone { workflow_name } => {
self.known_workflows.remove(&workflow_name);
if self.stream_for_workflow_name.contains_key(&workflow_name) {
info!(
workflow_name = %workflow_name,
"Workflow {} is gone", workflow_name,
);
}
}
FutureResult::ReactorCancellationReceived { stream_id } => {
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
for workflow_name in stream.target_workflow_names.drain() {
// Send disconnection message to old workflow
if let Some(channel) = self.known_workflows.get(&workflow_name) {
let _ = channel.send(WorkflowRequest {
request_id: "workflow_forwarder_reactor_update".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::StreamDisconnected,
},
},
});
}
if let Some(stream_ids) =
self.stream_for_workflow_name.get_mut(&workflow_name)
{
stream_ids.remove(&stream_id);
if stream_ids.is_empty() {
self.stream_for_workflow_name.remove(&workflow_name);
}
}
}
stream.cancellation_token = None;
}
}
FutureResult::WorkflowStartedOrStopped(event) => {
self.handle_workflow_event(event, &futures_channel);
}
}
}
for media in inputs.media.drain(..) {
self.handle_media(media, outputs, &futures_channel);
}
StepStatus::Active
}
}
impl Drop for WorkflowForwarderStep {
fn drop(&mut self) {
// Send a disconnect signal for any active streams we are tracking, so the target workflow
// knows not to expect more media from them.
for (stream_id, mut stream) in self.active_streams.drain() {
for workflow_name in stream.target_workflow_names.drain() {
if let Some(channel) = self.known_workflows.get(&workflow_name) {
let _ = channel.send(WorkflowRequest {
request_id: "workflow-forwarder-shutdown".to_string(),
operation: WorkflowRequestOperation::MediaNotification {
media: MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::StreamDisconnected,
},
},
});
}
}
}
}
}
fn notify_on_workflow_event(
receiver: UnboundedReceiver<WorkflowStartedOrStoppedEvent>,
futures_channel: &WorkflowStepFuturesChannel,
) {
futures_channel.send_on_generic_unbounded_recv(
receiver,
FutureResult::WorkflowStartedOrStopped,
|| FutureResult::EventHubGone,
);
}
fn notify_on_reactor_update(
stream_id: StreamId,
update_receiver: UnboundedReceiver<ReactorWorkflowUpdate>,
cancellation_token: CancellationToken,
futures_channel: &WorkflowStepFuturesChannel,
) {
let recv_stream_id = stream_id.clone();
let cancelled_stream_id = stream_id;
futures_channel.send_on_generic_unbounded_recv_cancellable(
update_receiver,
cancellation_token,
move |update| FutureResult::ReactorUpdateReceived {
stream_id: recv_stream_id.clone(),
update,
},
|| FutureResult::ReactorGone,
move || FutureResult::ReactorCancellationReceived {
stream_id: cancelled_stream_id,
},
);
}
fn notify_reactor_manager_gone(
sender: UnboundedSender<ReactorManagerRequest>,
futures_channel: &WorkflowStepFuturesChannel,
) {
futures_channel.send_on_generic_future_completion(async move {
sender.closed().await;
FutureResult::ReactorManagerGone
});
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/net/mod.rs | mmids-core/src/net/mod.rs | //! Networking layer for Mmids applications
use cidr_utils::cidr::{IpCidr, Ipv4Cidr};
use std::fmt::Formatter;
use std::hash::{Hash, Hasher};
use std::net::Ipv4Addr;
use std::sync::Arc;
use thiserror::Error;
pub mod tcp;
/// A unique identifier for any given TCP connection, or unique UDP client. If a TCP client
/// disconnects and reconnects it will be seen with a brand new connection id
#[derive(Clone, Debug, Eq)]
pub struct ConnectionId(pub Arc<String>);
impl std::fmt::Display for ConnectionId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
impl PartialEq<Self> for ConnectionId {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Hash for ConnectionId {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.hash(state)
}
}
/// Enumeration to make handling ip addresses vs subnets easier
#[derive(Debug, PartialEq, Eq)]
pub enum IpAddress {
Exact(Ipv4Addr),
Cidr(Ipv4Cidr),
}
/// Error when a given ip address or subnet could not be parsed from a given input
#[derive(Error, Debug)]
pub enum IpAddressParseError {
#[error("The value '{0}' was not a valid ip address or cidr value")]
InvalidValue(String),
}
impl IpAddress {
/// Checks if the other exact ip address is a match for the current ip address specification.
/// An address is a match if the current ip address is an exact one and both are exactly equal,
/// or if the current ip address is a CIDR subnet mask and the other ip address is contained
/// within.
pub fn matches(&self, other_address: &Ipv4Addr) -> bool {
match self {
IpAddress::Exact(self_address) => self_address == other_address,
IpAddress::Cidr(cidr) => cidr.contains(other_address),
}
}
/// Attempts to parse a string supposedly containing a comma delimited list of ip addresses
/// and cidr values. An empty string will return an empty collection of ips.
pub fn parse_comma_delimited_list(
input: Option<&String>,
) -> Result<Vec<IpAddress>, IpAddressParseError> {
let mut ips = Vec::new();
match input {
None => (),
Some(input) => {
for input in input.split(',') {
let ip = if let Ok(ip) = input.parse::<Ipv4Addr>() {
Some(IpAddress::Exact(ip))
} else if let Ok(cidr) = IpCidr::from_str(input) {
match cidr {
IpCidr::V4(cidr) => Some(IpAddress::Cidr(cidr)),
_ => None,
}
} else {
None
};
if let Some(ip) = ip {
ips.push(ip);
} else {
return Err(IpAddressParseError::InvalidValue(input.to_string()));
}
}
}
}
Ok(ips)
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/net/tcp/listener.rs | mmids-core/src/net/tcp/listener.rs | use super::TcpSocketResponse;
use crate::net::tcp::TlsOptions;
use crate::net::ConnectionId;
use bytes::{Bytes, BytesMut};
use futures::future::FutureExt;
use std::collections::VecDeque;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio_native_tls::TlsAcceptor;
use tracing::{error, info, instrument, warn};
use uuid::Uuid;
/// Set of bytes that should be sent over a TCP socket
pub struct OutboundPacket {
/// The bytes to send over the network
pub bytes: Bytes,
/// If the connection to the client is backlogged, then any packet that's marked as
/// droppable will be dropped
pub can_be_dropped: bool,
}
/// Parameters to start listening on a TCP port
pub struct ListenerParams {
/// The port to listen on
pub port: u16,
/// Should this port accept TLS connections
pub use_tls: bool,
/// Options for TLS. Required if use_tls is true
pub tls_options: Arc<Option<TlsOptions>>,
/// The channel in which to send notifications of port activity to
pub response_channel: UnboundedSender<TcpSocketResponse>,
}
enum ReadSocket {
Bare(ReadHalf<TcpStream>),
Tls(ReadHalf<tokio_native_tls::TlsStream<TcpStream>>),
}
enum WriteSocket {
Bare(WriteHalf<TcpStream>),
Tls(WriteHalf<tokio_native_tls::TlsStream<TcpStream>>),
}
/// Starts listening for TCP connections on the specified port. It returns a channel which
/// callers can use to know if the listener has shut down unexpectedly.
pub fn start(params: ListenerParams) -> UnboundedSender<()> {
let (self_disconnect_sender, self_disconnect_receiver) = unbounded_channel();
tokio::spawn(listen(params, self_disconnect_receiver));
self_disconnect_sender
}
#[instrument(skip(params, _self_disconnection_signal), fields(port = params.port, use_tls = params.use_tls))]
async fn listen(params: ListenerParams, _self_disconnection_signal: UnboundedReceiver<()>) {
info!("Socket listener for port started");
let ListenerParams {
port,
response_channel,
use_tls,
tls_options,
} = params;
let tls = if let Some(tls) = tls_options.as_ref() {
let identity = tls.certificate.clone();
let tls_acceptor = match native_tls::TlsAcceptor::builder(identity).build() {
Ok(x) => x,
Err(e) => {
error!("Failed to build tls acceptor: {:?}", e);
return;
}
};
Some(tokio_native_tls::TlsAcceptor::from(tls_acceptor))
} else {
None
};
let tls = if use_tls { tls } else { None };
let tls = Arc::new(tls);
let bind_address = "0.0.0.0:".to_string() + &port.to_string();
let listener = match TcpListener::bind(bind_address.clone()).await {
Ok(x) => x,
Err(e) => {
error!("Error occurred binding socket to {}: {:?}", bind_address, e);
return;
}
};
loop {
let disconnect = response_channel.clone();
tokio::select! {
result = listener.accept() => {
let (socket, client_info) = match result {
Ok(x) => x,
Err(e) => {
error!("Error accepting connection for listener on port {}: {:?}", port, e);
return;
}
};
let connection_id = ConnectionId(Arc::new(Uuid::new_v4().to_string()));
tokio::spawn(handle_new_connection(socket, client_info, response_channel.clone(), port, connection_id, tls.clone()));
},
_ = disconnect.closed() => {
break;
}
}
}
info!("Socket listener for port {} closing", port);
}
#[instrument(skip(tls_acceptor, response_channel, socket, client_info))]
async fn handle_new_connection(
socket: TcpStream,
client_info: SocketAddr,
response_channel: UnboundedSender<TcpSocketResponse>,
port: u16,
connection_id: ConnectionId,
tls_acceptor: Arc<Option<TlsAcceptor>>,
) {
info!(
ip = %client_info.ip(),
"Tcp Listener: new connection from {}, given id {}",
client_info.ip(),
connection_id
);
let (incoming_sender, incoming_receiver) = unbounded_channel();
let (outgoing_sender, outgoing_receiver) = unbounded_channel();
let message = TcpSocketResponse::NewConnection {
port,
connection_id: connection_id.clone(),
incoming_bytes: incoming_receiver,
outgoing_bytes: outgoing_sender,
socket_address: client_info,
};
if response_channel.send(message).is_err() {
info!("Port owner disconnected before connection was handled");
return;
}
let (reader, writer) = match split_socket(socket, tls_acceptor).await {
Ok(x) => x,
Err(e) => {
error!("Error splitting socket: {:?}", e);
return;
}
};
tokio::spawn(socket_reader(
connection_id.clone(),
reader,
incoming_sender,
response_channel,
));
tokio::spawn(socket_writer(connection_id, writer, outgoing_receiver));
}
#[instrument(skip(reader, incoming_sender, tcp_response_sender))]
async fn socket_reader(
connection_id: ConnectionId,
mut reader: ReadSocket,
incoming_sender: UnboundedSender<Bytes>,
tcp_response_sender: UnboundedSender<TcpSocketResponse>,
) {
let mut buffer = BytesMut::with_capacity(4096);
loop {
tokio::select! {
bytes_read = read_buf(&mut reader, &mut buffer) => {
let bytes_read = match bytes_read {
Ok(x) => x,
Err(e) => {
error!("Error reading from byte buffer: {:?}", e);
return;
}
};
if bytes_read == 0 {
break;
}
let bytes = buffer.split_off(bytes_read);
let received_bytes = buffer.freeze();
if incoming_sender.send(received_bytes).is_err() {
break;
}
buffer = bytes;
},
() = incoming_sender.closed() => {
break;
},
}
}
info!("reader task closed");
let _ = tcp_response_sender.send(TcpSocketResponse::Disconnection { connection_id });
}
#[instrument(skip(writer, outgoing_receiver))]
async fn socket_writer(
connection_id: ConnectionId,
mut writer: WriteSocket,
mut outgoing_receiver: UnboundedReceiver<OutboundPacket>,
) {
const INITIAL_BACKLOG_THRESHOLD: usize = 100;
const LETHAL_BACKLOG_THRESHOLD: usize = 1000;
let mut send_queue = VecDeque::new();
loop {
let packet = outgoing_receiver.recv().await;
if packet.is_none() {
break;
}
let packet = packet.unwrap();
// Since this is a TCP connection, we can only send so many packets before we have to wait
// for acknowledgements. If we don't have enough bandwidth to the client for the current
// batch of packets it's possible we get backlogged. The end result of this is the
// outgoing packet channel constantly filling with new packets. Left uncontrolled we'll
// run out of memory.
//
// We can't actually see how many items are in a MPSC, we need continually need to read
// items from the channel and place them in a queue, then when the channel is empty see
// how many packets we have in the queue. If we are above the lethal amount assume we
// will never catch up and kill the writer. If we are only above an initial threshold then
// only send packets not marked as droppable.
send_queue.push_back(packet);
while let Some(Some(packet)) = outgoing_receiver.recv().now_or_never() {
send_queue.push_back(packet);
}
if send_queue.len() >= LETHAL_BACKLOG_THRESHOLD {
warn!(
"{} outbound packets in the queue. Killing writer",
send_queue.len()
);
break;
}
let queue_length = send_queue.len();
let drop_optional_packets = send_queue.len() >= INITIAL_BACKLOG_THRESHOLD;
let mut dropped_packet_count = 0;
for packet in send_queue.drain(..) {
if !packet.can_be_dropped || !drop_optional_packets {
if let Err(e) = write_packet(&mut writer, packet).await {
error!("Error when writing packet bytes: {:?}", e);
return;
}
} else {
dropped_packet_count += 1;
}
}
if drop_optional_packets {
warn!(
"send queue was backlogged with {} packets ({} dropped)",
queue_length, dropped_packet_count
);
}
}
info!("writer task closed");
}
async fn split_socket(
socket: TcpStream,
tls_acceptor: Arc<Option<TlsAcceptor>>,
) -> Result<(ReadSocket, WriteSocket), Box<dyn std::error::Error + Sync + Send>> {
match tls_acceptor.as_ref() {
None => {
let (reader, writer) = tokio::io::split(socket);
Ok((ReadSocket::Bare(reader), WriteSocket::Bare(writer)))
}
Some(tls) => {
let tls_stream = tls.accept(socket).await?;
let (reader, writer) = tokio::io::split(tls_stream);
Ok((ReadSocket::Tls(reader), WriteSocket::Tls(writer)))
}
}
}
async fn read_buf(reader: &mut ReadSocket, buffer: &mut BytesMut) -> std::io::Result<usize> {
match reader {
ReadSocket::Bare(socket) => socket.read_buf(buffer).await,
ReadSocket::Tls(socket) => socket.read_buf(buffer).await,
}
}
async fn write_packet(writer: &mut WriteSocket, packet: OutboundPacket) -> std::io::Result<()> {
match writer {
WriteSocket::Bare(socket) => socket.write_all(packet.bytes.as_ref()).await,
WriteSocket::Tls(socket) => socket.write_all(packet.bytes.as_ref()).await,
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/net/tcp/mod.rs | mmids-core/src/net/tcp/mod.rs | //! A TCP socket manager actor that allows other systems to request TCP connections. The socket
//! manager will manage listeners for different ports, accept connections, unwrap SSL sessions (if
//! requested), and pass networked data to requesters.
mod listener;
mod socket_manager;
use super::ConnectionId;
use bytes::Bytes;
use native_tls::Identity;
use std::net::SocketAddr;
use tokio::sync::mpsc;
pub use listener::OutboundPacket;
pub use socket_manager::start as start_socket_manager;
/// Reasons why the request to listen for TCP connections can fail
#[derive(Debug)]
pub enum RequestFailureReason {
/// The port being requested has already been opened for another requester
PortInUse,
/// A TLS port was requested to be opened, but the certificate could not be opened
InvalidCertificate(String),
/// A TLS port was requested to be opened, but the password provided did not unlock the
/// provided certificate.
CertPasswordIncorrect,
/// A TLS port was requested to be opened, but the TCP socket manager was not given any
/// details required to accept TLS sessions.
NoTlsDetailsGiven,
}
/// Options required for TLS session handling
pub struct TlsOptions {
pub certificate: Identity,
}
/// Requests by callers to the TCP socket manager
#[derive(Debug)]
pub enum TcpSocketRequest {
/// Request for the server to start listening on a specific TCP port
OpenPort {
/// TCP port to be opened
port: u16,
/// If the port should be accepting TLS connections or not
use_tls: bool,
/// The channel in which responses should be sent. If the port is successfully opened
/// then all state changes for the port (such as new connections) will use this channel
/// for notifications
response_channel: mpsc::UnboundedSender<TcpSocketResponse>,
},
}
#[derive(Debug)]
/// Response messages that the TCP socket manager may send back
pub enum TcpSocketResponse {
/// Notification that the specified request that was previously made was accepted
RequestAccepted {},
/// Notification that the specified request that was previously made was denied
RequestDenied {
/// Reason why the request was denied
reason: RequestFailureReason,
},
/// Notification to system that requested a port be opened that the port has been
/// forced closed. This is mostly due to an error listening onto the socket.
PortForciblyClosed { port: u16 },
/// Notification that a client has connected to a TCP port opened by the receiver of this
/// notification.
NewConnection {
/// The port the TCP connection came in on
port: u16,
/// Unique identifier for this new connection
connection_id: ConnectionId,
/// Channel the owner can use to receive bytes sent from the client
incoming_bytes: mpsc::UnboundedReceiver<Bytes>,
/// Channel the owner can use to send bytes to the client
outgoing_bytes: mpsc::UnboundedSender<OutboundPacket>,
/// The socket address the client connected from
socket_address: SocketAddr,
},
/// Notification that a client has disconnected from a TCP port
Disconnection {
/// Unique identifier of the connection that disconnected
connection_id: ConnectionId,
},
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/net/tcp/socket_manager.rs | mmids-core/src/net/tcp/socket_manager.rs | use super::listener::{start as start_listener, ListenerParams};
use super::{TcpSocketRequest, TcpSocketResponse};
use crate::actor_utils::{notify_on_unbounded_closed, notify_on_unbounded_recv};
use crate::net::tcp::{RequestFailureReason, TlsOptions};
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tracing::{debug, error, info};
/// Starts a new instance of a socket manager task. A socket manager can be requested to open
/// ports on behalf of another system. If the port is successfully opened it will begin listening
/// for TCP connections on that port, and then manage the reading and writing of network traffic
/// for that connection.
pub fn start(tls_options: Option<TlsOptions>) -> UnboundedSender<TcpSocketRequest> {
let (request_sender, request_receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let manager = SocketManager::new(request_receiver, actor_sender);
tokio::spawn(manager.run(actor_receiver, tls_options));
request_sender
}
enum SocketManagerFutureResult {
AllRequestSendersGone,
IncomingRequest(TcpSocketRequest),
ListenerShutdown { port: u16 },
}
struct OpenPort {
response_channel: UnboundedSender<TcpSocketResponse>,
}
struct SocketManager {
internal_sender: UnboundedSender<SocketManagerFutureResult>,
open_ports: HashMap<u16, OpenPort>,
}
impl SocketManager {
fn new(
request_receiver: UnboundedReceiver<TcpSocketRequest>,
actor_sender: UnboundedSender<SocketManagerFutureResult>,
) -> Self {
notify_on_unbounded_recv(
request_receiver,
actor_sender.clone(),
SocketManagerFutureResult::IncomingRequest,
|| SocketManagerFutureResult::AllRequestSendersGone,
);
SocketManager {
internal_sender: actor_sender,
open_ports: HashMap::new(),
}
}
async fn run(
mut self,
mut actor_receiver: UnboundedReceiver<SocketManagerFutureResult>,
tls_options: Option<TlsOptions>,
) {
info!("Starting TCP socket manager");
let tls_options = Arc::new(tls_options);
while let Some(future_result) = actor_receiver.recv().await {
match future_result {
SocketManagerFutureResult::AllRequestSendersGone => {
info!("All TCP socket manager requesters gone");
break;
}
SocketManagerFutureResult::IncomingRequest(request) => {
self.handle_request(request, tls_options.clone());
}
SocketManagerFutureResult::ListenerShutdown { port } => {
match self.open_ports.remove(&port) {
None => (),
Some(details) => {
let _ = details
.response_channel
.send(TcpSocketResponse::PortForciblyClosed { port });
}
}
}
}
}
info!("Socket manager closing");
}
fn handle_request(&mut self, request: TcpSocketRequest, tls_options: Arc<Option<TlsOptions>>) {
match request {
TcpSocketRequest::OpenPort {
port,
response_channel,
use_tls,
} => {
if use_tls && tls_options.as_ref().is_none() {
error!(
port = port,
"Request to open port with tls, but we have no tls options"
);
let _ = response_channel.send(TcpSocketResponse::RequestDenied {
reason: RequestFailureReason::NoTlsDetailsGiven,
});
return;
}
if let Entry::Vacant(entry) = self.open_ports.entry(port) {
debug!(port = port, use_tls = use_tls, "TCP port being opened");
let details = OpenPort {
response_channel: response_channel.clone(),
};
entry.insert(details);
let listener_shutdown = start_listener(ListenerParams {
port,
response_channel: response_channel.clone(),
use_tls,
tls_options,
});
notify_on_unbounded_closed(
listener_shutdown,
self.internal_sender.clone(),
move || SocketManagerFutureResult::ListenerShutdown { port },
);
let _ = response_channel.send(TcpSocketResponse::RequestAccepted {});
} else {
debug!(port = port, "Port is already in use!");
let message = TcpSocketResponse::RequestDenied {
reason: RequestFailureReason::PortInUse,
};
let _ = response_channel.send(message);
}
}
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/reactors/manager.rs | mmids-core/src/reactors/manager.rs | //! The reactor manager creates new reactors and allows relaying requests to the correct reactor
//! based on names.
use crate::actor_utils::notify_on_unbounded_recv;
use crate::event_hub::SubscriptionRequest;
use crate::reactors::executors::{GenerationError, ReactorExecutorFactory};
use crate::reactors::reactor::ReactorWorkflowUpdate;
use crate::reactors::{start_reactor, ReactorDefinition, ReactorRequest};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::Sender;
use tracing::{error, info, instrument, warn};
/// Requests that can be made to the reactor manager
#[derive(Debug)]
pub enum ReactorManagerRequest {
/// Requests a reactor to be created based on the specified definition
CreateReactor {
definition: ReactorDefinition,
response_channel: Sender<CreateReactorResult>,
},
/// Requests that the specified reactor start a workflow based on the specified stream name
CreateWorkflowForStreamName {
/// The name of the reactor to send this request to
reactor_name: Arc<String>,
/// The name of the stream to look up a workflow for
stream_name: Arc<String>,
/// Channel that will be used to keep the created workflow alive. When the sender end of
/// the channel is closed, that will be a signal to the reactor to remove the created
/// workflow.
response_channel: UnboundedSender<ReactorWorkflowUpdate>,
},
}
#[derive(Debug)]
pub enum CreateReactorResult {
Success,
DuplicateReactorName,
ExecutorGeneratorError(GenerationError),
ExecutorReturnedError(Box<dyn std::error::Error + Sync + Send>),
}
pub fn start_reactor_manager(
executor_factory: ReactorExecutorFactory,
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
) -> UnboundedSender<ReactorManagerRequest> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = Actor::new(
executor_factory,
receiver,
event_hub_subscriber,
actor_sender,
);
tokio::spawn(actor.run(actor_receiver));
sender
}
enum FutureResult {
AllConsumersGone,
RequestReceived(ReactorManagerRequest),
}
struct Actor {
executor_factory: ReactorExecutorFactory,
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
reactors: HashMap<Arc<String>, UnboundedSender<ReactorRequest>>,
}
impl Actor {
fn new(
executor_factory: ReactorExecutorFactory,
receiver: UnboundedReceiver<ReactorManagerRequest>,
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
actor_sender: UnboundedSender<FutureResult>,
) -> Self {
notify_on_unbounded_recv(
receiver,
actor_sender,
FutureResult::RequestReceived,
|| FutureResult::AllConsumersGone,
);
Actor {
executor_factory,
event_hub_subscriber,
reactors: HashMap::new(),
}
}
#[instrument(name = "Reactor Manager Execution", skip(self))]
async fn run(mut self, mut receiver: UnboundedReceiver<FutureResult>) {
info!("Starting reactor manager");
while let Some(result) = receiver.recv().await {
match result {
FutureResult::AllConsumersGone => {
info!("All consumers gone");
break;
}
FutureResult::RequestReceived(request) => {
self.handle_request(request);
}
}
}
info!("Reactor manager closing");
}
fn handle_request(&mut self, request: ReactorManagerRequest) {
match request {
ReactorManagerRequest::CreateReactor {
definition,
response_channel,
} => {
if self.reactors.contains_key(&definition.name) {
let _ = response_channel.send(CreateReactorResult::DuplicateReactorName);
return;
}
let generator = match self.executor_factory.get_generator(&definition.executor) {
Ok(generator) => generator,
Err(error) => {
warn!(
reactor_name = %definition.name,
executor_name = %definition.executor,
"Reactor {} is configured to use executor {}, but the factory \
returned an error when trying to get it: {:?}",
definition.name, definition.executor, error
);
let _ = response_channel
.send(CreateReactorResult::ExecutorGeneratorError(error));
return;
}
};
let executor = match generator.generate(&definition.parameters) {
Ok(executor) => executor,
Err(error) => {
warn!(
reactor_name = %definition.name,
"Executor failed to be generated for reactor {}: {:?}",
definition.name, error
);
let _ = response_channel
.send(CreateReactorResult::ExecutorReturnedError(error));
return;
}
};
let reactor = start_reactor(
definition.name.clone(),
executor,
self.event_hub_subscriber.clone(),
definition.update_interval,
);
self.reactors.insert(definition.name, reactor);
let _ = response_channel.send(CreateReactorResult::Success);
}
ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name,
stream_name,
response_channel,
} => {
let reactor = match self.reactors.get(&reactor_name) {
Some(reactor) => reactor,
None => {
error!(
reactor_name = %reactor_name,
"Request received for reactor {}, but no reactor exists with that name",
reactor_name,
);
let _ = response_channel.send(ReactorWorkflowUpdate {
is_valid: false,
routable_workflow_names: HashSet::new(),
});
return;
}
};
let _ = reactor.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name,
response_channel,
});
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::reactors::executors::{
ReactorExecutionResult, ReactorExecutor, ReactorExecutorGenerator,
};
use crate::test_utils;
use crate::workflows::definitions::WorkflowDefinition;
use futures::future::BoxFuture;
use futures::FutureExt;
use std::error::Error;
use std::time::Duration;
use tokio::sync::oneshot::channel;
#[tokio::test]
async fn successful_result_for_new_reactor() {
let context = TestContext::new();
let mut parameters = HashMap::new();
parameters.insert("abc".to_string(), None);
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters,
executor: "exe".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
CreateReactorResult::Success => (),
response => panic!("Expected a success response, instead got {:?}", response),
}
}
#[tokio::test]
async fn duplicate_name_error_when_multiple_reactors_have_same_name() {
let context = TestContext::new();
let mut parameters = HashMap::new();
parameters.insert("abc".to_string(), None);
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters: parameters.clone(),
executor: "exe".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let _ = test_utils::expect_oneshot_response(receiver).await;
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters: parameters.clone(),
executor: "exe".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
CreateReactorResult::DuplicateReactorName => (),
response => panic!("Expected a success response, instead got {:?}", response),
}
}
#[tokio::test]
async fn error_when_generator_fails() {
let context = TestContext::new();
let mut parameters = HashMap::new();
parameters.insert("abcd".to_string(), None);
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters,
executor: "exe".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
CreateReactorResult::ExecutorReturnedError(_) => (),
response => panic!("Expected a success response, instead got {:?}", response),
}
}
#[tokio::test]
async fn error_when_generator_not_found() {
let context = TestContext::new();
let mut parameters = HashMap::new();
parameters.insert("abc".to_string(), None);
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters,
executor: "exe2".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
CreateReactorResult::ExecutorGeneratorError(
GenerationError::NoRegisteredGenerator(name),
) => {
assert_eq!(&name, "exe2", "Error contained an unexpected name");
}
response => panic!("Expected a success response, instead got {:?}", response),
}
}
#[tokio::test]
async fn create_workflow_request_sends_to_correct_reactor() {
let context = TestContext::new();
let mut parameters = HashMap::new();
parameters.insert("abc".to_string(), None);
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters,
executor: "exe".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
CreateReactorResult::Success => (),
response => panic!("Expected a success response, instead got {:?}", response),
}
let (sender, mut receiver) = unbounded_channel();
context
.manager
.send(ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name: Arc::new("reactor".to_string()),
stream_name: Arc::new("def".to_string()),
response_channel: sender,
})
.expect("Failed to send create workflow request");
let response = test_utils::expect_mpsc_response(&mut receiver).await;
assert!(
response.is_valid,
"Expected response to have an is_valid flag of true"
);
}
#[tokio::test]
async fn create_workflow_request_returns_not_valid_when_no_reactor_has_specified_name() {
let context = TestContext::new();
let mut parameters = HashMap::new();
parameters.insert("abc".to_string(), None);
let (sender, receiver) = channel();
context
.manager
.send(ReactorManagerRequest::CreateReactor {
definition: ReactorDefinition {
name: Arc::new("reactor".to_string()),
update_interval: Duration::new(0, 0),
parameters,
executor: "exe".to_string(),
},
response_channel: sender,
})
.expect("Failed to send create request");
let response = test_utils::expect_oneshot_response(receiver).await;
match response {
CreateReactorResult::Success => (),
response => panic!("Expected a success response, instead got {:?}", response),
}
let (sender, mut receiver) = unbounded_channel();
context
.manager
.send(ReactorManagerRequest::CreateWorkflowForStreamName {
reactor_name: Arc::new("reactor2".to_string()),
stream_name: Arc::new("def".to_string()),
response_channel: sender,
})
.expect("Failed to send create workflow request");
let response = test_utils::expect_mpsc_response(&mut receiver).await;
assert!(
!response.is_valid,
"Expected response to have an is_valid flag of false"
);
}
struct TestContext {
manager: UnboundedSender<ReactorManagerRequest>,
_event_receiver: UnboundedReceiver<SubscriptionRequest>,
}
struct TestExecutorGenerator;
struct TestExecutor;
impl TestContext {
fn new() -> Self {
let mut factory = ReactorExecutorFactory::new();
factory
.register("exe".to_string(), Box::new(TestExecutorGenerator))
.expect("Registration failed");
let (event_sender, event_receiver) = unbounded_channel();
let manager = start_reactor_manager(factory, event_sender);
TestContext {
manager,
_event_receiver: event_receiver,
}
}
}
impl ReactorExecutor for TestExecutor {
fn get_workflow(
&self,
_stream_name: Arc<String>,
) -> BoxFuture<'static, ReactorExecutionResult> {
async {
ReactorExecutionResult::valid(vec![WorkflowDefinition {
name: Arc::new("test".to_string()),
routed_by_reactor: false,
steps: Vec::new(),
}])
}
.boxed()
}
}
impl ReactorExecutorGenerator for TestExecutorGenerator {
fn generate(
&self,
parameters: &HashMap<String, Option<String>>,
) -> Result<Box<dyn ReactorExecutor + Send>, Box<dyn Error + Sync + Send>> {
if parameters.contains_key("abc") {
Ok(Box::new(TestExecutor))
} else {
Err("Test".into())
}
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/reactors/mod.rs | mmids-core/src/reactors/mod.rs | //! Reactors are actors that are used to manage workflows for specific stream names. This is a
//! pull mechanism for dynamic workflow capabilities in mmids. When a reactor is asked for a
//! workflow for a stream name, the reactor will reach out to an external service (configured
//! by a reactor executor) to obtain a workflow definition for the requested stream name. If none
//! is returned then that normally means the stream name is not allowed. If a valid workflow
//! definition is returned, the reactor will ensure that the workflow is created so media can be
//! routed to it.
pub mod executors;
pub mod manager;
mod reactor;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
pub use reactor::{start_reactor, ReactorRequest, ReactorWorkflowUpdate};
/// How reactors are defined
#[derive(Clone, Debug)]
pub struct ReactorDefinition {
/// The name of the reactor. Used by endpoints and workflow steps to identify which workflow
/// they want to interact with.
pub name: Arc<String>,
/// The name of the query executor this reactor should use to perform queries
pub executor: String,
/// How many seconds the reactor should wait before it re-runs the executor and gets the latest
/// version of the corresponding workflow definition. An update interval of 0 (or a value not
/// specified) means it will never update.
pub update_interval: Duration,
/// Key value pairs used to instruct the reactor's executor. Valid values here are specific
/// to the executor that was picked.
pub parameters: HashMap<String, Option<String>>,
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/reactors/reactor.rs | mmids-core/src/reactors/reactor.rs | use crate::actor_utils::{
notify_on_future_completion, notify_on_unbounded_closed, notify_on_unbounded_recv,
};
use crate::event_hub::{SubscriptionRequest, WorkflowManagerEvent};
use crate::reactors::executors::{ReactorExecutionResult, ReactorExecutor};
use crate::workflows::definitions::WorkflowDefinition;
use crate::workflows::manager::{WorkflowManagerRequest, WorkflowManagerRequestOperation};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tracing::{info, instrument, warn};
/// Requests that can be made to a reactor
#[derive(Debug)]
pub enum ReactorRequest {
/// Requests that the reactor creates and manages a workflow for the specified stream name
CreateWorkflowNameForStream {
/// Name of the stream to get a workflow for
stream_name: Arc<String>,
/// The channel to send a response for. This channel will not only be used for the
/// initial response, but updates will be sent any time the reactor detects changes.
response_channel: UnboundedSender<ReactorWorkflowUpdate>,
},
}
/// Contains information about a workflow from a reactor
#[derive(Debug)]
pub struct ReactorWorkflowUpdate {
/// If the reactor considers the stream name valid and workflows have been created for it.
pub is_valid: bool,
/// The names of workflows that the reactor expects streams to be routed to.
pub routable_workflow_names: HashSet<Arc<String>>,
}
pub fn start_reactor(
name: Arc<String>,
executor: Box<dyn ReactorExecutor + Send>,
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
update_interval: Duration,
) -> UnboundedSender<ReactorRequest> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = Actor::new(
name,
receiver,
executor,
event_hub_subscriber,
update_interval,
actor_sender,
);
tokio::spawn(actor.run(actor_receiver));
sender
}
enum FutureResult {
AllRequestConsumersGone,
EventHubGone,
WorkflowManagerGone,
RequestReceived(ReactorRequest),
ExecutorResponseReceived {
stream_name: Arc<String>,
result: ReactorExecutionResult,
},
WorkflowManagerEventReceived(WorkflowManagerEvent),
ClientResponseChannelClosed {
stream_name: Arc<String>,
},
UpdateStreamNameRequested {
stream_name: Arc<String>,
},
}
struct CachedWorkflows {
definitions: Vec<WorkflowDefinition>,
}
struct Actor {
internal_sender: UnboundedSender<FutureResult>,
name: Arc<String>,
executor: Box<dyn ReactorExecutor + Send>,
workflow_manager: Option<UnboundedSender<WorkflowManagerRequest>>,
cached_workflows_for_stream_name: HashMap<Arc<String>, CachedWorkflows>,
update_interval: Duration,
stream_response_channels: HashMap<Arc<String>, Vec<UnboundedSender<ReactorWorkflowUpdate>>>,
}
impl Actor {
fn new(
name: Arc<String>,
receiver: UnboundedReceiver<ReactorRequest>,
executor: Box<dyn ReactorExecutor + Send>,
event_hub_subscriber: UnboundedSender<SubscriptionRequest>,
update_interval: Duration,
actor_sender: UnboundedSender<FutureResult>,
) -> Self {
notify_on_unbounded_recv(
receiver,
actor_sender.clone(),
FutureResult::RequestReceived,
|| FutureResult::AllRequestConsumersGone,
);
let (manager_sender, manager_receiver) = unbounded_channel();
let _ = event_hub_subscriber.send(SubscriptionRequest::WorkflowManagerEvents {
channel: manager_sender,
});
notify_on_unbounded_recv(
manager_receiver,
actor_sender.clone(),
FutureResult::WorkflowManagerEventReceived,
|| FutureResult::EventHubGone,
);
Actor {
internal_sender: actor_sender,
name,
executor,
workflow_manager: None,
cached_workflows_for_stream_name: HashMap::new(),
update_interval,
stream_response_channels: HashMap::new(),
}
}
#[instrument(name = "Reactor Execution", skip(self, receiver), fields(name = %self.name))]
async fn run(mut self, mut receiver: UnboundedReceiver<FutureResult>) {
info!("Starting reactor");
while let Some(result) = receiver.recv().await {
match result {
FutureResult::AllRequestConsumersGone => {
info!("All consumers gone");
break;
}
FutureResult::EventHubGone => {
info!("Event manager gone");
break;
}
FutureResult::WorkflowManagerGone => {
info!("Workflow manager gone");
break;
}
FutureResult::ClientResponseChannelClosed { stream_name } => {
self.handle_response_channel_closed(stream_name);
}
FutureResult::RequestReceived(request) => {
self.handle_request(request);
}
FutureResult::ExecutorResponseReceived {
stream_name,
result: workflow,
} => {
self.handle_executor_response(stream_name, workflow);
}
FutureResult::UpdateStreamNameRequested { stream_name } => {
if self
.cached_workflows_for_stream_name
.contains_key(&stream_name)
{
let future = self.executor.get_workflow(stream_name.clone());
notify_on_future_completion(
future,
self.internal_sender.clone(),
move |result| FutureResult::ExecutorResponseReceived {
stream_name,
result,
},
);
}
}
FutureResult::WorkflowManagerEventReceived(event) => {
self.handle_workflow_manager_event(event);
}
}
}
info!("Reactor closing");
}
fn handle_request(&mut self, request: ReactorRequest) {
match request {
ReactorRequest::CreateWorkflowNameForStream {
stream_name,
response_channel,
} => {
info!(
stream_name = %stream_name,
"Received request to get workflow for stream '{}'", stream_name
);
let channels = self
.stream_response_channels
.entry(stream_name.clone())
.or_default();
channels.push(response_channel.clone());
if let Some(cache) = self.cached_workflows_for_stream_name.get_mut(&stream_name) {
let _ = response_channel.send(ReactorWorkflowUpdate {
is_valid: true,
routable_workflow_names: cache
.definitions
.iter()
.filter(|w| w.routed_by_reactor)
.map(|w| w.name.clone())
.collect::<HashSet<_>>(),
});
} else {
let future = self.executor.get_workflow(stream_name.clone());
let stream_name = stream_name.clone();
notify_on_future_completion(
future,
self.internal_sender.clone(),
move |result| FutureResult::ExecutorResponseReceived {
stream_name,
result,
},
);
}
notify_on_unbounded_closed(
response_channel,
self.internal_sender.clone(),
move || FutureResult::ClientResponseChannelClosed { stream_name },
);
}
}
}
fn handle_executor_response(
&mut self,
stream_name: Arc<String>,
result: ReactorExecutionResult,
) {
if let Some(channels) = self.stream_response_channels.get(&stream_name) {
let routed_workflow_names = result
.workflows_returned
.iter()
.filter(|w| w.routed_by_reactor)
.map(|w| w.name.clone())
.collect::<HashSet<_>>();
info!(
stream_name = %stream_name,
workflow_count = %result.workflows_returned.len(),
routed_count = %routed_workflow_names.len(),
"Executor returned {} workflows ({} routed) for the stream '{}'",
result.workflows_returned.len(), routed_workflow_names.len(), stream_name,
);
if !result.stream_is_valid {
if let Some(cache) = self.cached_workflows_for_stream_name.remove(&stream_name) {
// Since we had some workflows cached, and now the external service isn't giving us
// any workflows, that means this stream name is no longer valid.
if let Some(manager) = &self.workflow_manager {
for workflow in cache.definitions {
let _ = manager.send(WorkflowManagerRequest {
request_id: format!(
"reactor_{}_stream_{}_ended",
self.name, stream_name
),
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: workflow.name,
},
});
}
}
}
} else {
if routed_workflow_names.is_empty() {
warn!(
stream_name = %stream_name,
"Zero routed workflows returned for stream '{}'. Any workflow router steps \
will not forward media to these workflows", stream_name
);
}
// Upsert all returned workflows
if let Some(manager) = &self.workflow_manager {
for workflow in &result.workflows_returned {
let _ = manager.send(WorkflowManagerRequest {
request_id: format!(
"reactor_{}_stream_{}_update",
self.name, stream_name
),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: workflow.clone(),
},
});
}
}
let current_workflow_names = result
.workflows_returned
.iter()
.map(|w| w.name.clone())
.collect::<HashSet<_>>();
let new_cache = CachedWorkflows {
definitions: result.workflows_returned,
};
if let Some(old_cache) = self
.cached_workflows_for_stream_name
.insert(stream_name.clone(), new_cache)
{
// Stop any workflows that are were not returned by the executor
if let Some(manager) = &self.workflow_manager {
for workflow in old_cache.definitions {
if !current_workflow_names.contains(&workflow.name) {
let _ = manager.send(WorkflowManagerRequest {
request_id: format!(
"reactor_{}_stream_{}_partially_ended",
self.name, stream_name
),
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: workflow.name,
},
});
}
}
}
}
}
for channel in channels {
let _ = channel.send(ReactorWorkflowUpdate {
is_valid: result.stream_is_valid,
routable_workflow_names: routed_workflow_names.clone(),
});
}
if !self.update_interval.is_zero() {
notify_after_update_interval(
stream_name,
self.update_interval,
self.internal_sender.clone(),
);
}
}
}
fn handle_workflow_manager_event(&mut self, event: WorkflowManagerEvent) {
match event {
WorkflowManagerEvent::WorkflowManagerRegistered { channel } => {
info!("Reactor received a workflow manager channel");
notify_on_unbounded_closed(channel.clone(), self.internal_sender.clone(), || {
FutureResult::WorkflowManagerGone
});
// Upsert all cached workflows
for cached_workflow in self.cached_workflows_for_stream_name.values() {
for workflow in &cached_workflow.definitions {
let _ = channel.send(WorkflowManagerRequest {
request_id: format!("reactor_{}_cache_catchup", self.name),
operation: WorkflowManagerRequestOperation::UpsertWorkflow {
definition: workflow.clone(),
},
});
}
}
self.workflow_manager = Some(channel);
}
}
}
fn handle_response_channel_closed(&mut self, stream_name: Arc<String>) {
if let Some(channels) = self.stream_response_channels.get_mut(&stream_name) {
for x in (0..channels.len()).rev() {
if channels[x].is_closed() {
channels.remove(x);
}
}
if channels.is_empty() {
info!(
stream_name = %stream_name,
"All response channels for stream {} closed", stream_name
);
self.stream_response_channels.remove(&stream_name);
if let Some(channel) = &self.workflow_manager {
if let Some(cache) = self.cached_workflows_for_stream_name.remove(&stream_name)
{
for workflow in cache.definitions {
let _ = channel.send(WorkflowManagerRequest {
request_id: format!(
"reactor_{}_stream_{}_closed",
self.name, stream_name
),
operation: WorkflowManagerRequestOperation::StopWorkflow {
name: workflow.name,
},
});
}
}
}
} else {
info!(
stream_name = %stream_name,
"Response channel for stream {} closed but {} still remain",
stream_name, channels.len(),
);
}
}
}
}
fn notify_after_update_interval(
stream_name: Arc<String>,
wait_time: Duration,
actor_channel: UnboundedSender<FutureResult>,
) {
tokio::spawn(async move {
tokio::select! {
_ = tokio::time::sleep(wait_time) => {
let _ = actor_channel.send(FutureResult::UpdateStreamNameRequested {stream_name});
}
_ = actor_channel.closed() => { }
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils;
use crate::workflows::definitions::{WorkflowStepDefinition, WorkflowStepType};
use futures::future::BoxFuture;
use futures::FutureExt;
use tokio::time::timeout;
struct TestContext {
_event_hub: UnboundedReceiver<SubscriptionRequest>,
_workflow_manager_events: UnboundedSender<WorkflowManagerEvent>,
workflow_manager: UnboundedReceiver<WorkflowManagerRequest>,
reactor: UnboundedSender<ReactorRequest>,
}
struct TestExecutor {
expected_name: Arc<String>,
workflows: Vec<WorkflowDefinition>,
}
impl TestContext {
async fn new(name: Arc<String>, duration: Duration, executor: TestExecutor) -> Self {
let (sender, mut sub_receiver) = unbounded_channel();
let reactor = start_reactor(name, Box::new(executor), sender, duration);
let response = test_utils::expect_mpsc_response(&mut sub_receiver).await;
let response_channel = match response {
SubscriptionRequest::WorkflowManagerEvents { channel } => channel,
event => panic!("Unexpected event: {:?}", event),
};
let (wm_sender, wm_receiver) = unbounded_channel();
response_channel
.send(WorkflowManagerEvent::WorkflowManagerRegistered { channel: wm_sender })
.expect("Channel closed");
TestContext {
reactor,
_event_hub: sub_receiver,
_workflow_manager_events: response_channel,
workflow_manager: wm_receiver,
}
}
}
impl ReactorExecutor for TestExecutor {
fn get_workflow(
&self,
stream_name: Arc<String>,
) -> BoxFuture<'static, ReactorExecutionResult> {
let future = if self.expected_name == stream_name {
let workflows = self.workflows.clone();
async { ReactorExecutionResult::valid(workflows) }.boxed()
} else {
async { ReactorExecutionResult::invalid() }.boxed()
};
future
}
}
#[tokio::test]
async fn can_get_routable_workflows_from_executor() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_millis(0),
executor,
)
.await;
let (sender, mut receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("stream".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let update = test_utils::expect_mpsc_response(&mut receiver).await;
assert!(update.is_valid, "Expected is valid to be true");
assert_eq!(
update.routable_workflow_names.len(),
2,
"Expected 2 routable workflows"
);
assert!(
update
.routable_workflow_names
.contains(&Arc::new("first".to_string())),
"Did not find 'first' workflow in routable results"
);
assert!(
update
.routable_workflow_names
.contains(&Arc::new("third".to_string())),
"Did not find 'third' workflow in routable results"
);
}
#[tokio::test]
async fn not_valid_if_stream_name_invalid() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_millis(0),
executor,
)
.await;
let (sender, mut receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("invalid".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let update = test_utils::expect_mpsc_response(&mut receiver).await;
assert!(!update.is_valid, "Expected is valid to be false");
assert_eq!(
update.routable_workflow_names.len(),
0,
"Expected no routable workflow names"
);
}
#[tokio::test]
async fn all_workflows_upserted_to_workflow_manager() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let mut context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_millis(0),
executor,
)
.await;
let (sender, _receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("stream".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let mut workflows_found = [false, false, false];
loop {
let request = test_utils::expect_mpsc_response(&mut context.workflow_manager).await;
match request.operation {
WorkflowManagerRequestOperation::UpsertWorkflow { definition } => {
if definition.name.as_str() == "first" {
if workflows_found[0] {
panic!("Received duplicate upsert request for workflow 'first'");
}
assert_eq!(definition.steps.len(), 1, "Expected 1 workflows");
workflows_found[0] = true;
} else if definition.name.as_str() == "second" {
if workflows_found[1] {
panic!("Received duplicate upsert request for workflow 'second'");
}
assert_eq!(definition.steps.len(), 2, "Expected 2 workflow steps");
workflows_found[1] = true;
} else if definition.name.as_str() == "third" {
if workflows_found[2] {
panic!("Received duplicate upsert request for workflow 'third'");
}
assert_eq!(definition.steps.len(), 3, "Expected 3 workflow steps");
workflows_found[2] = true;
} else {
panic!("Unexpected workflow: {}", definition.name);
}
}
operation => panic!("Expected upsert request, instead got {:?}", operation),
}
if workflows_found[0] && workflows_found[1] && workflows_found[2] {
break;
}
}
test_utils::expect_mpsc_timeout(&mut context.workflow_manager).await;
}
#[tokio::test]
async fn workflows_not_updated_when_duration_is_zero() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_secs(10),
executor,
)
.await;
let (sender, mut receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("stream".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let _ = test_utils::expect_mpsc_response(&mut receiver).await;
test_utils::expect_mpsc_timeout(&mut receiver).await;
tokio::time::sleep(Duration::from_secs(1)).await;
test_utils::expect_mpsc_timeout(&mut receiver).await;
}
#[tokio::test]
async fn routable_workflows_updated_when_duration_set() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_millis(500),
executor,
)
.await;
let (sender, mut receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("stream".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let _ = test_utils::expect_mpsc_response(&mut receiver).await;
test_utils::expect_mpsc_timeout(&mut receiver).await;
tokio::time::sleep(Duration::from_millis(500)).await;
let update = test_utils::expect_mpsc_response(&mut receiver).await;
assert!(update.is_valid, "Expected is valid to be true");
assert_eq!(
update.routable_workflow_names.len(),
2,
"Expected 2 routable workflows"
);
assert!(
update
.routable_workflow_names
.contains(&Arc::new("first".to_string())),
"Did not find 'first' workflow in routable results"
);
assert!(
update
.routable_workflow_names
.contains(&Arc::new("third".to_string())),
"Did not find 'third' workflow in routable results"
);
}
#[tokio::test]
async fn all_workflows_upserted_to_workflow_manager_again_after_duration() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let mut context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_millis(500),
executor,
)
.await;
let (sender, _receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("stream".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let wait_time = Duration::from_millis(10);
while timeout(wait_time, context.workflow_manager.recv())
.await
.is_ok()
{
// Keep looping until we time out, thus the workflow manager channel becomes empty
}
tokio::time::sleep(Duration::from_millis(500)).await;
let mut workflows_found = [false, false, false];
loop {
let request = test_utils::expect_mpsc_response(&mut context.workflow_manager).await;
match request.operation {
WorkflowManagerRequestOperation::UpsertWorkflow { definition } => {
if definition.name.as_str() == "first" {
if workflows_found[0] {
panic!("Received duplicate upsert request for workflow 'first'");
}
assert_eq!(definition.steps.len(), 1, "Expected 1 workflows");
workflows_found[0] = true;
} else if definition.name.as_str() == "second" {
if workflows_found[1] {
panic!("Received duplicate upsert request for workflow 'second'");
}
assert_eq!(definition.steps.len(), 2, "Expected 2 workflow steps");
workflows_found[1] = true;
} else if definition.name.as_str() == "third" {
if workflows_found[2] {
panic!("Received duplicate upsert request for workflow 'third'");
}
assert_eq!(definition.steps.len(), 3, "Expected 3 workflow steps");
workflows_found[2] = true;
} else {
panic!("Unexpected workflow: {}", definition.name);
}
}
operation => panic!("Expected upsert request, instead got {:?}", operation),
}
if workflows_found[0] && workflows_found[1] && workflows_found[2] {
break;
}
}
test_utils::expect_mpsc_timeout(&mut context.workflow_manager).await;
}
#[tokio::test]
async fn workflow_manager_not_given_new_workflows_when_duration_is_zero() {
let executor = TestExecutor {
expected_name: Arc::new("stream".to_string()),
workflows: get_test_workflows(),
};
let mut context = TestContext::new(
Arc::new("reactor".to_string()),
Duration::from_millis(0),
executor,
)
.await;
let (sender, _receiver) = unbounded_channel();
context
.reactor
.send(ReactorRequest::CreateWorkflowNameForStream {
stream_name: Arc::new("stream".to_string()),
response_channel: sender,
})
.expect("Channel closed");
let wait_time = Duration::from_millis(10);
while timeout(wait_time, context.workflow_manager.recv())
.await
.is_ok()
{
// Keep looping until we time out, thus the workflow manager channel becomes empty
}
tokio::time::sleep(Duration::from_millis(500)).await;
test_utils::expect_mpsc_timeout(&mut context.workflow_manager).await;
}
fn get_test_workflows() -> Vec<WorkflowDefinition> {
vec![
WorkflowDefinition {
name: Arc::new("first".to_string()),
routed_by_reactor: true,
steps: vec![WorkflowStepDefinition {
step_type: WorkflowStepType("a".to_string()),
parameters: HashMap::new(),
}],
},
WorkflowDefinition {
name: Arc::new("second".to_string()),
routed_by_reactor: false,
steps: vec![
WorkflowStepDefinition {
step_type: WorkflowStepType("b".to_string()),
parameters: HashMap::new(),
},
WorkflowStepDefinition {
step_type: WorkflowStepType("c".to_string()),
parameters: HashMap::new(),
},
],
},
WorkflowDefinition {
name: Arc::new("third".to_string()),
routed_by_reactor: true,
steps: vec![
WorkflowStepDefinition {
step_type: WorkflowStepType("d".to_string()),
parameters: HashMap::new(),
},
WorkflowStepDefinition {
step_type: WorkflowStepType("e".to_string()),
parameters: HashMap::new(),
},
WorkflowStepDefinition {
step_type: WorkflowStepType("f".to_string()),
parameters: HashMap::new(),
},
],
},
]
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/reactors/executors/mod.rs | mmids-core/src/reactors/executors/mod.rs | pub mod simple_http_executor;
use crate::workflows::definitions::WorkflowDefinition;
use futures::future::BoxFuture;
use std::collections::HashMap;
use std::sync::Arc;
use thiserror::Error;
/// Contains the result from a reactor execution request about a stream
pub struct ReactorExecutionResult {
/// Was the stream the reactor queried about valid
pub stream_is_valid: bool,
/// If the stream was valid, what workflows were defined. it's valid for a stream to be valid
/// without any workflows.
pub workflows_returned: Vec<WorkflowDefinition>,
}
/// Performs a request for workflow information on behalf of a reactor
pub trait ReactorExecutor {
/// Requests the definition of a workflow based on a stream name
fn get_workflow(&self, stream_name: Arc<String>) -> BoxFuture<'static, ReactorExecutionResult>;
}
/// Allows generating a reactor executor using parameters from a reactor definition
pub trait ReactorExecutorGenerator {
fn generate(
&self,
parameters: &HashMap<String, Option<String>>,
) -> Result<Box<dyn ReactorExecutor + Send>, Box<dyn std::error::Error + Sync + Send>>;
}
#[derive(Default)]
pub struct ReactorExecutorFactory {
generators: HashMap<String, Box<dyn ReactorExecutorGenerator + Send>>,
}
#[derive(Error, Debug)]
pub enum RegistrationError {
#[error("A reactor executor generator is already registered with the name '{0}'")]
DuplicateName(String),
}
#[derive(Error, Debug)]
pub enum GenerationError {
#[error("No generators have been registered for the executor name '{0}'")]
NoRegisteredGenerator(String),
}
impl ReactorExecutionResult {
pub fn invalid() -> Self {
ReactorExecutionResult {
stream_is_valid: false,
workflows_returned: Vec::new(),
}
}
pub fn valid(workflows: Vec<WorkflowDefinition>) -> Self {
ReactorExecutionResult {
stream_is_valid: true,
workflows_returned: workflows,
}
}
}
impl ReactorExecutorFactory {
pub fn new() -> Self {
Default::default()
}
pub fn register(
&mut self,
name: String,
generator: Box<dyn ReactorExecutorGenerator + Send>,
) -> Result<(), RegistrationError> {
if self.generators.contains_key(&name) {
return Err(RegistrationError::DuplicateName(name));
}
self.generators.insert(name, generator);
Ok(())
}
pub fn get_generator(
&self,
name: &str,
) -> Result<&dyn ReactorExecutorGenerator, GenerationError> {
match self.generators.get(name) {
Some(generator) => Ok(generator.as_ref()),
None => Err(GenerationError::NoRegisteredGenerator(name.to_string())),
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-core/src/reactors/executors/simple_http_executor.rs | mmids-core/src/reactors/executors/simple_http_executor.rs | use crate::config::MmidsConfig;
use crate::reactors::executors::{
ReactorExecutionResult, ReactorExecutor, ReactorExecutorGenerator,
};
use async_recursion::async_recursion;
use futures::future::BoxFuture;
use futures::FutureExt;
use hyper::http::HeaderValue;
use hyper::{Body, Client, Method, Request, StatusCode};
use serde::Serialize;
use std::collections::HashMap;
use std::error::Error;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
use tracing::{error, info, instrument};
const MAX_RETRIES: u64 = 3;
const RETRY_DELAY: u64 = 5;
/// Attempts to query for a workflow definition by performing a simple HTTP POST request to the
/// configured URL. The request will contain a body with a json object containing the stream name to look
/// up the workflow for. It's expecting a response of either 404 (denoting that no workflow exists
/// for the stream name) or a 200. When a 200 is returned we are expecting definitions for one or
/// more workflows in the standard mmids configuration format.
///
/// Zero workflows are allowed in a 200 status code. This represents that the stream name is valid
/// (and should be allowed) but it does not have an specific workflows tied to it.
pub struct SimpleHttpExecutor {
url: Arc<String>,
}
impl ReactorExecutor for SimpleHttpExecutor {
fn get_workflow(&self, stream_name: Arc<String>) -> BoxFuture<'static, ReactorExecutionResult> {
execute_simple_http_executor(self.url.clone(), stream_name).boxed()
}
}
pub struct SimpleHttpExecutorGenerator {}
#[derive(Error, Debug)]
pub enum SimpleHttpExecutorError {
#[error("The required parameter 'url' was not provided")]
UrlParameterNotProvided,
}
#[derive(Serialize)]
struct RequestContent {
stream_name: String,
}
impl ReactorExecutorGenerator for SimpleHttpExecutorGenerator {
fn generate(
&self,
parameters: &HashMap<String, Option<String>>,
) -> Result<Box<dyn ReactorExecutor + Send>, Box<dyn Error + Sync + Send>> {
let url = match parameters.get("url") {
Some(Some(url)) => Arc::new(url.trim().to_string()),
_ => return Err(Box::new(SimpleHttpExecutorError::UrlParameterNotProvided)),
};
Ok(Box::new(SimpleHttpExecutor { url }))
}
}
#[instrument]
async fn execute_simple_http_executor(
url: Arc<String>,
stream_name: Arc<String>,
) -> ReactorExecutionResult {
info!("Querying {} for workflow for stream '{}'", url, stream_name);
let mut config = match execute_with_retry(&url, &stream_name, 0).await {
Ok(config) => config,
Err(_) => return ReactorExecutionResult::invalid(),
};
let workflows = config.workflows.drain().map(|kvp| kvp.1).collect();
ReactorExecutionResult::valid(workflows)
}
fn build_request(url: &Arc<String>, stream_name: &str) -> Result<Request<Body>, ()> {
let content = match serde_json::to_string_pretty(&RequestContent {
stream_name: stream_name.to_owned(),
}) {
Ok(json) => json,
Err(error) => {
error!("Failed to serialize stream name to json: {:?}", error);
return Err(());
}
};
let request = Request::builder()
.method(Method::POST)
.uri(url.to_string())
.header(
hyper::http::header::CONTENT_TYPE,
HeaderValue::from_static("application/json"),
)
.body(Body::from(content));
match request {
Ok(request) => Ok(request),
Err(error) => {
error!("Failed to build request: {}", error);
Err(())
}
}
}
#[async_recursion]
async fn execute_with_retry(
url: &Arc<String>,
stream_name: &Arc<String>,
times_retried: u64,
) -> Result<MmidsConfig, ()> {
if times_retried >= MAX_RETRIES {
info!("Too many retries, giving up");
return Err(());
}
let delay = times_retried * RETRY_DELAY;
tokio::time::sleep(Duration::from_secs(delay)).await;
if times_retried > 0 {
info!("Attempting retry #{}", times_retried);
}
let request = match build_request(url, stream_name) {
Ok(request) => request,
Err(_) => return Err(()), // retry wont' help building the request
};
if let Ok(config) = execute_http_call(request).await {
if let Some(config) = config {
Ok(config)
} else {
Err(()) // Since we got a valid not found result, don't bother retrying
}
} else {
execute_with_retry(url, stream_name, times_retried + 1).await
}
}
async fn execute_http_call(request: Request<Body>) -> Result<Option<MmidsConfig>, ()> {
let client = Client::new();
let response = match client.request(request).await {
Ok(response) => response,
Err(error) => {
error!("Error performing request: {}", error);
return Err(());
}
};
match response.status() {
StatusCode::OK => (),
StatusCode::NOT_FOUND => {
info!("Not found returned for request");
return Ok(None);
}
status => {
error!("Unexpected status code returned: {}", status);
return Err(());
}
};
let bytes = match hyper::body::to_bytes(response.into_body()).await {
Ok(bytes) => bytes,
Err(error) => {
error!("Failed to convert response to bytes: {}", error);
return Err(());
}
};
let content = match String::from_utf8(bytes.to_vec()) {
Ok(content) => content,
Err(error) => {
error!("Failed to convert response to a UTF8 string: {}", error);
return Err(());
}
};
let config = match crate::config::parse(content.as_str()) {
Ok(config) => config,
Err(parse_error) => {
error!(
"The response was not a valid mmids config format: {:?}",
parse_error
);
return Err(());
}
};
Ok(Some(config))
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/lib.rs | mmids-ffmpeg/src/lib.rs | //! Components to integrate the FFMPEG into mmids workflows
pub mod endpoint;
pub mod workflow_steps;
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/endpoint.rs | mmids-ffmpeg/src/endpoint.rs | //! Endpoint used to manage a local ffmpeg executable. Workflow steps can request FFMPEG be run
//! with specific parameters, and the endpoint will run it. If the ffmpeg process stops before
//! being requested to stop, then the endpoint will ensure it gets re-run.
use mmids_core::actor_utils::{notify_on_future_completion, notify_on_unbounded_recv};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::process::{Child, Command, Stdio};
use std::time::Duration;
use thiserror::Error;
use tokio::fs::{File, OpenOptions};
use tokio::io::AsyncWriteExt;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::time::sleep;
use tracing::{error, info, instrument};
use uuid::Uuid;
/// Requests of ffmpeg operations
#[derive(Debug)]
pub enum FfmpegEndpointRequest {
/// Request that ffmpeg should be started with the specified parameters
StartFfmpeg {
/// A unique identifier to use for this ffmpeg operation. Any further requests that should
/// affect this ffmpeg operation should use this same identifier
id: Uuid,
/// The channel that the endpoint will send notifications on to notify the requester of
/// changes in the ffmpeg operation.
notification_channel: UnboundedSender<FfmpegEndpointNotification>,
/// What parameters ffmpeg should be run with
params: FfmpegParams,
},
/// Requests that the specified ffmpeg operation should be stopped
StopFfmpeg {
/// The identifier of the existing ffmpeg operation
id: Uuid,
},
}
/// Notifications of what's happening with an ffmpeg operation
#[derive(Debug)]
pub enum FfmpegEndpointNotification {
FfmpegStarted,
FfmpegStopped,
FfmpegFailedToStart { cause: FfmpegFailureCause },
}
/// Reasons that ffmpeg may fail to start
#[derive(Debug)]
pub enum FfmpegFailureCause {
/// The log file for ffmpeg's standard output could not be created
LogFileCouldNotBeCreated(String, std::io::Error),
/// ffmpeg was requested to be started with an identifier that matches an ffmpeg operation
/// that's already being run.
DuplicateId(Uuid),
/// The ffmpeg process failed to start due to an issue with the executable itself
FfmpegFailedToStart,
}
/// Error that occurs when starting the ffmpeg endpoint
#[derive(Error, Debug)]
pub enum FfmpegEndpointStartError {
#[error("The ffmpeg executable '{0}' was not found")]
FfmpegExecutableNotFound(String),
#[error("Failed to create log directory")]
LogDirectoryCreationFailure,
#[error("The log directory '{0}' is an invalid path")]
LogDirectoryInvalidPath(String),
}
/// H264 presets
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum H264Preset {
UltraFast,
SuperFast,
VeryFast,
Faster,
Fast,
Medium,
Slow,
Slower,
VerySlow,
}
/// Video transcode instructions
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum VideoTranscodeParams {
Copy,
H264 { preset: H264Preset },
}
/// Audio transcode instructions
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum AudioTranscodeParams {
Copy,
Aac,
}
/// Where should ffmpeg send the media
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum TargetParams {
/// Send the media stream to an RTMP server
Rtmp { url: String },
/// Save the media stream as an HLS playlist
Hls {
/// The directory the playlist should be saved to.
path: String,
/// How long (in seconds) should each segment be
segment_length: u16,
/// The maximum number of segments that should be in the playlist. If none is specified
/// than ffmpeg's default will be used
max_entries: Option<u16>,
},
}
/// The dimensions video should be scaled to
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct VideoScale {
pub width: u16,
pub height: u16,
}
/// Parameters to pass to the ffmpeg process
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct FfmpegParams {
pub read_in_real_time: bool,
pub input: String,
pub video_transcode: VideoTranscodeParams,
pub scale: Option<VideoScale>,
pub audio_transcode: AudioTranscodeParams,
pub bitrate_in_kbps: Option<u16>,
pub target: TargetParams,
}
/// Starts a new ffmpeg endpoint, and returns the channel in which the newly created endpoint
/// can be communicated with
pub fn start_ffmpeg_endpoint(
ffmpeg_exe_path: String,
log_root: String,
) -> Result<UnboundedSender<FfmpegEndpointRequest>, FfmpegEndpointStartError> {
let (sender, receiver) = unbounded_channel();
let (actor_sender, actor_receiver) = unbounded_channel();
let actor = Actor::new(ffmpeg_exe_path, log_root, receiver, actor_sender)?;
tokio::spawn(actor.run(actor_receiver));
Ok(sender)
}
enum FutureResult {
AllConsumersGone,
NotificationChannelGone(Uuid),
RequestReceived(FfmpegEndpointRequest),
CheckProcess(Uuid),
}
struct FfmpegProcess {
handle: Child,
notification_channel: UnboundedSender<FfmpegEndpointNotification>,
}
struct Actor {
internal_sender: UnboundedSender<FutureResult>,
ffmpeg_exe_path: String,
log_path: PathBuf,
processes: HashMap<Uuid, FfmpegProcess>,
}
impl Actor {
fn new(
ffmpeg_exe_path: String,
log_root: String,
request_receiver: UnboundedReceiver<FfmpegEndpointRequest>,
actor_sender: UnboundedSender<FutureResult>,
) -> Result<Self, FfmpegEndpointStartError> {
let path = Path::new(ffmpeg_exe_path.as_str());
if !path.is_file() {
return Err(FfmpegEndpointStartError::FfmpegExecutableNotFound(
ffmpeg_exe_path,
));
}
let mut path = PathBuf::from(log_root.as_str());
if path.is_file() {
// We expected the path to be a new or existing directory, not a file
return Err(FfmpegEndpointStartError::LogDirectoryInvalidPath(log_root));
}
path.push("ffmpeg_stdout");
if !path.exists() {
if let Err(error) = std::fs::create_dir_all(&path) {
error!(
"Could not create log directory '{}': {:?}",
path.display().to_string(),
error
);
return Err(FfmpegEndpointStartError::LogDirectoryCreationFailure);
}
}
notify_on_unbounded_recv(
request_receiver,
actor_sender.clone(),
FutureResult::RequestReceived,
|| FutureResult::AllConsumersGone,
);
Ok(Actor {
internal_sender: actor_sender,
ffmpeg_exe_path,
log_path: path,
processes: HashMap::new(),
})
}
async fn run(mut self, mut receiver: UnboundedReceiver<FutureResult>) {
info!("Ffmpeg endpoint started");
info!("Ffmpeg path: {}", self.ffmpeg_exe_path);
while let Some(result) = receiver.recv().await {
match result {
FutureResult::AllConsumersGone => {
info!("All consumers gone");
break;
}
FutureResult::NotificationChannelGone(id) => {
self.handle_notification_channel_gone(id);
}
FutureResult::CheckProcess(id) => {
self.check_status(id);
}
FutureResult::RequestReceived(request) => {
self.handle_request(request).await;
}
}
}
info!("Ffmpeg endpoint closing");
for (id, process) in self.processes.drain() {
stop_process(id, process);
}
}
#[instrument(skip(self, id), fields(ffmpeg_id = ?id))]
fn check_status(&mut self, id: Uuid) {
let mut has_exited = false;
if let Some(process) = self.processes.get_mut(&id) {
has_exited = match process.handle.try_wait() {
Ok(None) => false, // still running
Ok(Some(status)) => {
info!("Ffmpeg process {} exited with status {}", id, status);
true
}
Err(e) => {
info!(
"Error attempting to get status for ffmpeg process {}: {}",
id, e
);
let _ = process.handle.kill();
true
}
};
if !has_exited {
notify_on_next_check(id, self.internal_sender.clone());
}
}
if has_exited {
let process = self.processes.remove(&id).unwrap();
let _ = process
.notification_channel
.send(FfmpegEndpointNotification::FfmpegStopped);
}
}
fn handle_notification_channel_gone(&mut self, id: Uuid) {
info!(id = ?id, "Consumer for ffmpeg process {} is gone", id);
if let Some(process) = self.processes.remove(&id) {
stop_process(id, process);
}
}
async fn handle_request(&mut self, request: FfmpegEndpointRequest) {
match request {
FfmpegEndpointRequest::StopFfmpeg { id } => {
if let Some(process) = self.processes.remove(&id) {
stop_process(id, process);
}
}
FfmpegEndpointRequest::StartFfmpeg {
id,
params,
notification_channel,
} => {
if self.processes.contains_key(&id) {
let _ = notification_channel.send(
FfmpegEndpointNotification::FfmpegFailedToStart {
cause: FfmpegFailureCause::DuplicateId(id),
},
);
return;
}
let log_file_name = format!("{}.log", id);
let log_path = self.log_path.as_path().join(log_file_name.as_str());
let log_file_result = OpenOptions::new()
.append(true)
.create(true)
.open(log_path)
.await;
let mut log_file = match log_file_result {
Ok(x) => x,
Err(e) => {
error!("Failed to create ffmpeg log file '{}'", log_file_name);
let _ = notification_channel.send(
FfmpegEndpointNotification::FfmpegFailedToStart {
cause: FfmpegFailureCause::LogFileCouldNotBeCreated(
log_file_name.to_string(),
e,
),
},
);
return;
}
};
// Add a separator so we have a clear boundary when appending to an existing log file.
// We will append if we re-use the same ffmpeg id multiple times. This is usually done
// to keep the logs from a restarting ffmpeg instance together.
let _ = log_file
.write(b"\n\n------------------New Execution----------------\n\n")
.await;
let handle = match self.start_ffmpeg(&id, ¶ms, log_file) {
Ok(x) => x,
Err(e) => {
error!("Failed to start ffmpeg: {}", e);
let _ = notification_channel.send(
FfmpegEndpointNotification::FfmpegFailedToStart {
cause: FfmpegFailureCause::FfmpegFailedToStart,
},
);
return;
}
};
notify_on_next_check(id, self.internal_sender.clone());
let _ = notification_channel.send(FfmpegEndpointNotification::FfmpegStarted);
self.processes.insert(
id,
FfmpegProcess {
handle,
notification_channel: notification_channel.clone(),
},
);
notify_on_future_completion(
async move { notification_channel.closed().await },
self.internal_sender.clone(),
move |_| FutureResult::NotificationChannelGone(id),
);
}
}
}
fn start_ffmpeg(
&self,
id: &Uuid,
params: &FfmpegParams,
mut log_file: File,
) -> Result<Child, std::io::Error> {
let mut args = Vec::new();
if params.read_in_real_time {
args.push("-re".to_string());
}
args.push("-i".to_string());
args.push(params.input.to_string());
args.push("-vcodec".to_string());
match ¶ms.video_transcode {
VideoTranscodeParams::Copy => args.push("copy".to_string()),
VideoTranscodeParams::H264 { preset } => {
args.push("libx264".to_string());
args.push("-preset".to_string());
match preset {
H264Preset::UltraFast => args.push("ultrafast".to_string()),
H264Preset::SuperFast => args.push("superfast".to_string()),
H264Preset::VeryFast => args.push("veryfast".to_string()),
H264Preset::Faster => args.push("faster".to_string()),
H264Preset::Fast => args.push("fast".to_string()),
H264Preset::Medium => args.push("medium".to_string()),
H264Preset::Slow => args.push("slow".to_string()),
H264Preset::Slower => args.push("slower".to_string()),
H264Preset::VerySlow => args.push("veryslow".to_string()),
}
}
}
if let Some(bitrate) = ¶ms.bitrate_in_kbps {
let rate = format!("{}K", bitrate);
args.push("-b:v".to_string());
args.push(rate.clone());
args.push("-minrate".to_string());
args.push(rate.clone());
args.push("-maxrate".to_string());
args.push(rate);
}
if let Some(scale) = ¶ms.scale {
args.push("-vf".to_string());
args.push(format!("scale={}:{}", scale.width, scale.height));
}
args.push("-acodec".to_string());
match ¶ms.audio_transcode {
AudioTranscodeParams::Copy => args.push("copy".to_string()),
AudioTranscodeParams::Aac => args.push("aac".to_string()),
}
args.push("-f".to_string());
match ¶ms.target {
TargetParams::Rtmp { url } => {
args.push("flv".to_string());
args.push(url.to_string());
}
TargetParams::Hls {
path,
max_entries,
segment_length,
} => {
args.push("hls".to_string());
args.push("-hls_time".to_string());
args.push(segment_length.to_string());
if let Some(entries) = max_entries {
args.push("-hls_list_size".to_string());
args.push(entries.to_string());
}
args.push(path.clone());
}
}
args.push("-y".to_string()); // always overwrite
args.push("-nostats".to_string());
info!(
ffmpeg_id = ?id,
"Starting ffmpeg for id {} with the following arguments: {:?}",
id, args
);
let mut command = Command::new(&self.ffmpeg_exe_path)
.args(args)
.stderr(Stdio::piped()) // ffmpeg seems to write output to stderr
.spawn()?;
if let Some(stderr) = command.stderr.take() {
if let Ok(mut stdout) = tokio::process::ChildStderr::from_std(stderr) {
tokio::spawn(async move {
let _ = tokio::io::copy(&mut stdout, &mut log_file).await;
});
}
}
Ok(command)
}
}
fn stop_process(id: Uuid, mut process: FfmpegProcess) {
info!(id = ?id, "Killing ffmpeg process {}", id);
let _ = process.handle.kill();
let _ = process
.notification_channel
.send(FfmpegEndpointNotification::FfmpegStopped);
}
fn notify_on_next_check(id: Uuid, actor_sender: UnboundedSender<FutureResult>) {
notify_on_future_completion(sleep(Duration::from_secs(5)), actor_sender, move |_| {
FutureResult::CheckProcess(id)
});
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/ffmpeg_handler.rs | mmids-ffmpeg/src/workflow_steps/ffmpeg_handler.rs | use crate::endpoint::{FfmpegEndpointNotification, FfmpegEndpointRequest, FfmpegParams};
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::StreamId;
use mmids_rtmp::workflow_steps::external_stream_handler::{
ExternalStreamHandler, ExternalStreamHandlerGenerator, ResolvedFutureStatus,
StreamHandlerFutureResult, StreamHandlerFutureWrapper,
};
use std::sync::Arc;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use tracing::{error, info, instrument, warn};
use uuid::Uuid;
pub struct FfmpegHandler {
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
status: FfmpegHandlerStatus,
param_generator: Arc<Box<dyn FfmpegParameterGenerator + Sync + Send>>,
stream_id: StreamId,
ffmpeg_id: Uuid,
}
pub struct FfmpegHandlerGenerator {
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
param_generator: Arc<Box<dyn FfmpegParameterGenerator + Sync + Send>>,
}
pub trait FfmpegParameterGenerator {
fn form_parameters(&self, stream_id: &StreamId, stream_name: &str) -> FfmpegParams;
}
#[derive(Debug)]
enum FfmpegHandlerStatus {
Inactive,
Pending,
Active,
}
enum FutureResult {
FfmpegChannelGone,
NotificationReceived(FfmpegEndpointNotification),
}
impl StreamHandlerFutureResult for FutureResult {}
impl FfmpegHandlerGenerator {
pub fn new(
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
param_generator: Box<dyn FfmpegParameterGenerator + Sync + Send>,
) -> Self {
FfmpegHandlerGenerator {
ffmpeg_endpoint,
param_generator: Arc::new(param_generator),
}
}
}
impl ExternalStreamHandlerGenerator for FfmpegHandlerGenerator {
fn generate(&self, stream_id: StreamId) -> Box<dyn ExternalStreamHandler + Sync + Send> {
Box::new(FfmpegHandler {
ffmpeg_endpoint: self.ffmpeg_endpoint.clone(),
param_generator: self.param_generator.clone(),
stream_id,
status: FfmpegHandlerStatus::Inactive,
ffmpeg_id: Uuid::new_v4(),
})
}
}
impl FfmpegHandler {
#[instrument(skip(self, notification), fields(stream_id = ?self.stream_id, ffmpeg_id = ?self.ffmpeg_id))]
fn handle_ffmpeg_notification(&mut self, notification: FfmpegEndpointNotification) {
match notification {
FfmpegEndpointNotification::FfmpegStarted => match &self.status {
FfmpegHandlerStatus::Pending => {
info!(
"Received notification that ffmpeg became active for stream id {:?} and ffmpeg id {}",
self.stream_id, self.ffmpeg_id
);
self.status = FfmpegHandlerStatus::Active;
}
status => {
error!(
"Received notification that ffmpeg became active for stream id {:?}, \
but the handler's status was {:?} instead of pending",
self.stream_id, status
);
}
},
FfmpegEndpointNotification::FfmpegStopped => {
info!(
"Received ffmpeg stopped notification for stream {:?}",
self.stream_id
);
self.status = FfmpegHandlerStatus::Inactive;
}
FfmpegEndpointNotification::FfmpegFailedToStart { cause } => {
warn!(
"Ffmpeg failed to start for stream {:?}: {:?}",
self.stream_id, cause
);
self.status = FfmpegHandlerStatus::Inactive;
}
}
}
}
impl ExternalStreamHandler for FfmpegHandler {
fn prepare_stream(&mut self, stream_name: &str, futures_channel: &WorkflowStepFuturesChannel) {
if let FfmpegHandlerStatus::Inactive = &self.status {
let parameters = self
.param_generator
.form_parameters(&self.stream_id, stream_name);
let (sender, receiver) = unbounded_channel();
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StartFfmpeg {
id: self.ffmpeg_id,
params: parameters,
notification_channel: sender,
});
let recv_stream_id = self.stream_id.clone();
let closed_stream_id = self.stream_id.clone();
futures_channel.send_on_generic_unbounded_recv(
receiver,
move |notification| StreamHandlerFutureWrapper {
stream_id: recv_stream_id.clone(),
future: Box::new(FutureResult::NotificationReceived(notification)),
},
move || StreamHandlerFutureWrapper {
stream_id: closed_stream_id,
future: Box::new(FutureResult::FfmpegChannelGone),
},
);
self.status = FfmpegHandlerStatus::Pending;
}
}
fn stop_stream(&mut self) {
match &self.status {
FfmpegHandlerStatus::Pending => {
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StopFfmpeg { id: self.ffmpeg_id });
}
FfmpegHandlerStatus::Active => {
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StopFfmpeg { id: self.ffmpeg_id });
}
FfmpegHandlerStatus::Inactive => (),
}
}
fn handle_resolved_future(
&mut self,
future: Box<dyn StreamHandlerFutureResult>,
) -> ResolvedFutureStatus {
let future = match future.downcast::<FutureResult>() {
Ok(x) => *x,
Err(_) => return ResolvedFutureStatus::Success,
};
match future {
FutureResult::FfmpegChannelGone => ResolvedFutureStatus::StreamShouldBeStopped,
FutureResult::NotificationReceived(notification) => {
self.handle_ffmpeg_notification(notification);
ResolvedFutureStatus::Success
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::endpoint::{AudioTranscodeParams, TargetParams, VideoTranscodeParams};
use mmids_core::workflows::definitions::WorkflowStepId;
use mmids_core::workflows::steps::futures_channel::FuturesChannelResult;
use tokio::sync::mpsc::UnboundedReceiver;
struct TestParamGenerator;
impl FfmpegParameterGenerator for TestParamGenerator {
fn form_parameters(&self, stream_id: &StreamId, stream_name: &str) -> FfmpegParams {
FfmpegParams {
audio_transcode: AudioTranscodeParams::Copy,
video_transcode: VideoTranscodeParams::Copy,
bitrate_in_kbps: None,
scale: None,
read_in_real_time: true,
input: stream_name.to_string(),
target: TargetParams::Rtmp {
url: stream_id.0.to_string(),
},
}
}
}
struct TestContext {
ffmpeg: UnboundedReceiver<FfmpegEndpointRequest>,
handler: Box<dyn ExternalStreamHandler>,
step_futures_channel: WorkflowStepFuturesChannel,
_step_futures_receiver: UnboundedReceiver<FuturesChannelResult>,
}
impl TestContext {
fn new() -> Self {
let (request_sender, request_receiver) = unbounded_channel();
let generator = FfmpegHandlerGenerator {
ffmpeg_endpoint: request_sender,
param_generator: Arc::new(Box::new(TestParamGenerator)),
};
let (futures_sender, futures_receiver) = unbounded_channel();
let futures_channel =
WorkflowStepFuturesChannel::new(WorkflowStepId(234), futures_sender);
let handler = generator.generate(StreamId(Arc::new("test".to_string())));
TestContext {
handler,
ffmpeg: request_receiver,
step_futures_channel: futures_channel,
_step_futures_receiver: futures_receiver,
}
}
}
#[tokio::test]
async fn prepare_stream_sends_start_ffmpeg_request() {
let mut context = TestContext::new();
context
.handler
.prepare_stream("name", &context.step_futures_channel);
match context.ffmpeg.try_recv() {
Ok(FfmpegEndpointRequest::StartFfmpeg {
id: _,
params,
notification_channel: _,
}) => {
assert_eq!(¶ms.input, "name", "Unexpected parameter name");
}
other => panic!("Expected Ok(StartFfmpeg), instead got {:?}", other),
}
}
#[tokio::test]
async fn stop_ffmpeg_sent_when_stop_stream_called() {
let mut context = TestContext::new();
context
.handler
.prepare_stream("name", &context.step_futures_channel);
let _ = context.ffmpeg.try_recv();
context.handler.stop_stream();
match context.ffmpeg.try_recv() {
Ok(FfmpegEndpointRequest::StopFfmpeg { id: _ }) => (),
other => panic!("Expected Ok(StopFfmpeg) instead got {:?}", other),
}
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/mod.rs | mmids-ffmpeg/src/workflow_steps/mod.rs | //! Workflow steps to integrate FFMPEG into mmids workflows
pub mod ffmpeg_handler;
pub mod ffmpeg_hls;
pub mod ffmpeg_pull;
pub mod ffmpeg_rtmp_push;
pub mod ffmpeg_transcode;
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/ffmpeg_transcode/tests.rs | mmids-ffmpeg/src/workflow_steps/ffmpeg_transcode/tests.rs | use crate::endpoint::{
AudioTranscodeParams, FfmpegEndpointNotification, FfmpegEndpointRequest, FfmpegParams,
H264Preset, TargetParams, VideoTranscodeParams,
};
use crate::workflow_steps::ffmpeg_transcode::{
FfmpegTranscoderStepGenerator, AUDIO_CODEC_NAME, BITRATE_NAME, H264_PRESET_NAME, SIZE_NAME,
VIDEO_CODEC_NAME,
};
use anyhow::Result;
use bytes::{Bytes, BytesMut};
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use mmids_core::net::ConnectionId;
use mmids_core::workflows::definitions::{WorkflowStepDefinition, WorkflowStepType};
use mmids_core::workflows::metadata::common_metadata::{
get_is_keyframe_metadata_key, get_pts_offset_metadata_key,
};
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataKey, MetadataKeyMap, MetadataValue,
};
use mmids_core::workflows::steps::test_utils::StepTestContext;
use mmids_core::workflows::steps::StepStatus;
use mmids_core::workflows::{MediaNotification, MediaNotificationContent, MediaType};
use mmids_core::{test_utils, StreamId};
use mmids_rtmp::rtmp_server::{
RtmpEndpointMediaData, RtmpEndpointMediaMessage, RtmpEndpointPublisherMessage,
RtmpEndpointRequest, RtmpEndpointWatcherNotification, StreamKeyRegistration,
};
use rml_rtmp::sessions::StreamMetadata;
use rml_rtmp::time::RtmpTimestamp;
use std::collections::HashMap;
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use uuid::Uuid;
struct TestContext {
step_context: StepTestContext,
rtmp_endpoint: UnboundedReceiver<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedReceiver<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct DefinitionBuilder {
vcodec: Option<String>,
acodec: Option<String>,
h264_preset: Option<String>,
size: Option<String>,
bitrate: Option<u16>,
}
impl DefinitionBuilder {
fn new() -> Self {
DefinitionBuilder {
vcodec: None,
acodec: None,
h264_preset: None,
size: None,
bitrate: None,
}
}
fn vcodec(mut self, vcodec: &str) -> Self {
self.vcodec = Some(vcodec.to_string());
self
}
fn acodec(mut self, acodec: &str) -> Self {
self.acodec = Some(acodec.to_string());
self
}
fn h264_preset(mut self, preset: &str) -> Self {
self.h264_preset = Some(preset.to_string());
self
}
fn size(mut self, size: &str) -> Self {
self.size = Some(size.to_string());
self
}
fn bitrate(mut self, bitrate: u16) -> Self {
self.bitrate = Some(bitrate);
self
}
fn build(self) -> WorkflowStepDefinition {
let mut definition = WorkflowStepDefinition {
step_type: WorkflowStepType("ffmpeg_transocde".to_string()),
parameters: HashMap::new(),
};
if let Some(vcodec) = self.vcodec {
definition
.parameters
.insert(VIDEO_CODEC_NAME.to_string(), Some(vcodec));
} else {
definition
.parameters
.insert(VIDEO_CODEC_NAME.to_string(), Some("copy".to_string()));
}
if let Some(acodec) = self.acodec {
definition
.parameters
.insert(AUDIO_CODEC_NAME.to_string(), Some(acodec));
} else {
definition
.parameters
.insert(AUDIO_CODEC_NAME.to_string(), Some("copy".to_string()));
}
if let Some(preset) = self.h264_preset {
definition
.parameters
.insert(H264_PRESET_NAME.to_string(), Some(preset));
}
if let Some(size) = self.size {
definition
.parameters
.insert(SIZE_NAME.to_string(), Some(size));
}
if let Some(bitrate) = self.bitrate {
definition
.parameters
.insert(BITRATE_NAME.to_string(), Some(bitrate.to_string()));
}
definition
}
}
impl TestContext {
fn new(definition: WorkflowStepDefinition) -> Result<Self> {
let (rtmp_sender, rtmp_receiver) = unbounded_channel();
let (ffmpeg_sender, ffmpeg_receiver) = unbounded_channel();
let mut metadata_map = MetadataKeyMap::new();
let is_keyframe_metadata_key = get_is_keyframe_metadata_key(&mut metadata_map);
let pts_offset_metadata_key = get_pts_offset_metadata_key(&mut metadata_map);
let generator = FfmpegTranscoderStepGenerator {
ffmpeg_endpoint: ffmpeg_sender,
rtmp_server_endpoint: rtmp_sender,
is_keyframe_metadata_key,
pts_offset_metadata_key,
};
let step_context = StepTestContext::new(Box::new(generator), definition)?;
Ok(TestContext {
step_context,
rtmp_endpoint: rtmp_receiver,
ffmpeg_endpoint: ffmpeg_receiver,
is_keyframe_metadata_key,
pts_offset_metadata_key,
})
}
async fn accept_watch_registration(
&mut self,
) -> (
UnboundedSender<RtmpEndpointWatcherNotification>,
UnboundedReceiver<RtmpEndpointMediaMessage>,
) {
let request = test_utils::expect_mpsc_response(&mut self.rtmp_endpoint).await;
let channels = match request {
RtmpEndpointRequest::ListenForWatchers {
media_channel,
notification_channel,
..
} => {
notification_channel
.send(RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful)
.expect("Failed to send registration response");
(notification_channel, media_channel)
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
self.step_context.execute_pending_futures().await;
channels
}
async fn accept_publish_registration(
&mut self,
) -> UnboundedSender<RtmpEndpointPublisherMessage> {
let request = test_utils::expect_mpsc_response(&mut self.rtmp_endpoint).await;
let channel = match request {
RtmpEndpointRequest::ListenForPublishers {
message_channel, ..
} => {
message_channel
.send(RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful)
.expect("Failed to send registration response");
message_channel
}
request => panic!("Unexpected rtmp request seen: {:?}", request),
};
self.step_context.execute_pending_futures().await;
channel
}
async fn process_ffmpeg_event(
&mut self,
) -> (
UnboundedSender<FfmpegEndpointNotification>,
FfmpegParams,
Uuid,
) {
let request = test_utils::expect_mpsc_response(&mut self.ffmpeg_endpoint).await;
match request {
FfmpegEndpointRequest::StartFfmpeg {
notification_channel,
params,
id,
} => (notification_channel, params, id),
request => panic!("Unexpected request: {:?}", request),
}
}
}
#[tokio::test]
async fn step_starts_in_active_state() {
let definition = DefinitionBuilder::new().build();
let context = TestContext::new(definition).unwrap();
let status = context.step_context.status;
assert_eq!(status, StepStatus::Active, "Unexpected step status");
}
#[tokio::test]
async fn step_fails_to_build_when_invalid_vcodec_specified() {
let definition = DefinitionBuilder::new().vcodec("abcdef").build();
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[tokio::test]
async fn step_fails_to_build_when_no_vcodec_specified() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(VIDEO_CODEC_NAME);
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[test]
fn step_fails_to_build_when_invalid_acodec_specified() {
let definition = DefinitionBuilder::new().acodec("abcdef").build();
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[test]
fn step_fails_to_build_when_no_acodec_specified() {
let mut definition = DefinitionBuilder::new().build();
definition.parameters.remove(AUDIO_CODEC_NAME);
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[test]
fn step_fails_to_build_when_h264_specified_and_no_preset_specified() {
let mut definition = DefinitionBuilder::new().vcodec("abcdef").build();
definition.parameters.remove(H264_PRESET_NAME);
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[test]
fn step_fails_to_build_when_h264_specified_and_invalid_preset() {
let definition = DefinitionBuilder::new()
.vcodec("h264")
.h264_preset("abc")
.build();
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[test]
fn step_fails_to_build_when_invalid_size_specified() {
let definition = DefinitionBuilder::new().size("abc").build();
match TestContext::new(definition) {
Err(_) => (),
Ok(_) => panic!("Expected failure"),
}
}
#[tokio::test]
async fn rtmp_watch_registration_raised_on_new_stream() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let request = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match request {
RtmpEndpointRequest::ListenForWatchers {
rtmp_stream_key, ..
} => {
assert_eq!(
rtmp_stream_key,
StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
"Unexpected stream key"
);
}
request => panic!("Unexpected request received: {:?}", request),
}
}
#[tokio::test]
async fn rtmp_publish_registration_raised_after_watch_accepted() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let request = test_utils::expect_mpsc_response(&mut context.rtmp_endpoint).await;
match request {
RtmpEndpointRequest::ListenForPublishers {
rtmp_stream_key, ..
} => {
assert_eq!(
rtmp_stream_key,
StreamKeyRegistration::Exact(Arc::new("abc".to_string())),
"Unexpected stream key"
);
}
request => panic!("Unexpected request received: {:?}", request),
}
}
#[tokio::test]
async fn ffmpeg_request_raised_after_publish_accepted() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let request = test_utils::expect_mpsc_response(&mut context.ffmpeg_endpoint).await;
match request {
FfmpegEndpointRequest::StartFfmpeg { .. } => (),
request => panic!("Unexpected request: {:?}", request),
}
}
#[tokio::test]
async fn h264_with_preset_passed_to_ffmpeg() {
let definition = DefinitionBuilder::new()
.vcodec("h264")
.h264_preset("ultrafast")
.build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
match params.video_transcode {
VideoTranscodeParams::H264 {
preset: H264Preset::UltraFast,
} => (),
params => panic!("Unexpected video params: {:?}", params),
}
}
#[tokio::test]
async fn video_copy_passed_to_ffmpeg() {
let definition = DefinitionBuilder::new().vcodec("copy").build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
match params.video_transcode {
VideoTranscodeParams::Copy => (),
params => panic!("Unexpected video params: {:?}", params),
}
}
#[tokio::test]
async fn aac_acodec_passed_to_ffmpeg() {
let definition = DefinitionBuilder::new().acodec("aac").build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
match params.audio_transcode {
AudioTranscodeParams::Aac => (),
params => panic!("Unexpected video params: {:?}", params),
}
}
#[tokio::test]
async fn copy_acodec_passed_to_ffmpeg() {
let definition = DefinitionBuilder::new().acodec("copy").build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
match params.audio_transcode {
AudioTranscodeParams::Copy => (),
params => panic!("Unexpected video params: {:?}", params),
}
}
#[tokio::test]
async fn size_passed_to_ffmpeg() {
let definition = DefinitionBuilder::new()
.vcodec("h264")
.h264_preset("ultrafast")
.size("1920x1080")
.build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
let scale = params.scale.expect("Expected scale parameters");
assert_eq!(scale.width, 1920, "Unexpected width");
assert_eq!(scale.height, 1080, "Unexpected height");
}
#[tokio::test]
async fn bitrate_passed_to_ffmpeg() {
let definition = DefinitionBuilder::new()
.vcodec("h264")
.h264_preset("ultrafast")
.bitrate(1233)
.build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
let bitrate = params.bitrate_in_kbps.expect("Expected bitrate value");
assert_eq!(bitrate, 1233, "Unexpected bitrate");
}
#[tokio::test]
async fn ffmpeg_always_told_to_read_in_real_time() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
assert!(
params.read_in_real_time,
"Expected read in real time to be true"
);
}
#[tokio::test]
async fn ffmpeg_instructed_to_read_from_rtmp() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (_channel, params, _id) = context.process_ffmpeg_event().await;
match params.target {
TargetParams::Rtmp { url } => {
assert!(
url.starts_with("rtmp://localhost/"),
"Unexpected start of url: {}",
url
);
assert!(url.ends_with("/abc"), "Unexpected end of url: {}", url);
}
target => panic!("Unexpected target: {:?}", target),
}
}
#[tokio::test]
async fn if_ffmpeg_process_stops_unexpectedly_it_starts_again_with_same_id_and_params() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let (ffmpeg_channel, params, id) = context.process_ffmpeg_event().await;
ffmpeg_channel
.send(FfmpegEndpointNotification::FfmpegStopped)
.expect("Failed to send ffmpeg stopped command");
context.step_context.execute_pending_futures().await;
let (_channel, new_params, new_id) = context.process_ffmpeg_event().await;
assert_eq!(new_params, params, "Parameters were not equal");
assert_eq!(new_id, id, "Ids were not equal");
}
#[tokio::test]
async fn stream_started_notification_passed_through_immediately() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("abc".to_string()),
},
});
}
#[tokio::test]
async fn disconnection_notification_passed_through_immediately() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::StreamDisconnected,
});
}
#[tokio::test]
async fn metadata_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::Metadata {
data: HashMap::new(),
},
});
}
#[tokio::test]
async fn video_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: Duration::from_millis(0),
is_required_for_decoding: true,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
data: Bytes::from(vec![1, 2]),
},
});
}
#[tokio::test]
async fn audio_notification_passed_as_input_does_not_get_passed_as_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context
.step_context
.assert_media_not_passed_through(MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2]),
timestamp: Duration::from_millis(5),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
});
}
#[tokio::test]
async fn video_packet_sent_to_watcher_media_channel() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let (_notification, mut media_channel) = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let _ffmpeg_results = context.process_ffmpeg_event().await;
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
timestamp: Duration::from_millis(0),
is_required_for_decoding: false,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
data: Bytes::from_static(&[1, 2]),
},
};
context.step_context.execute_with_media(media.clone());
let response = test_utils::expect_mpsc_response(&mut media_channel).await;
assert_eq!(response.stream_key.as_str(), "abc", "Unexpected stream key");
let expected_endpoint_media_data = RtmpEndpointMediaData::from_media_notification_content(
media.content,
context.is_keyframe_metadata_key,
context.pts_offset_metadata_key,
)
.unwrap();
assert_eq!(
response.data, expected_endpoint_media_data,
"Unexpected media sent"
);
}
#[tokio::test]
async fn audio_packet_sent_to_watcher_media_channel() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let (_notification, mut media_channel) = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let _ffmpeg_results = context.process_ffmpeg_event().await;
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::MediaPayload {
data: Bytes::from(vec![1, 2]),
timestamp: Duration::from_millis(5),
is_required_for_decoding: true,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
};
context.step_context.execute_with_media(media.clone());
let response = test_utils::expect_mpsc_response(&mut media_channel).await;
assert_eq!(response.stream_key.as_str(), "abc", "Unexpected stream key");
let expected_endpoint_media_data = RtmpEndpointMediaData::from_media_notification_content(
media.content,
context.is_keyframe_metadata_key,
context.pts_offset_metadata_key,
)
.unwrap();
assert_eq!(
response.data, expected_endpoint_media_data,
"Unexpected media sent"
);
}
#[tokio::test]
async fn metadata_packet_sent_to_watcher_media_channel() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let (_notification, mut media_channel) = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let _ffmpeg_results = context.process_ffmpeg_event().await;
let media = MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::Metadata {
data: HashMap::new(),
},
};
context.step_context.execute_with_media(media.clone());
context.step_context.execute_pending_futures().await;
let response = test_utils::expect_mpsc_response(&mut media_channel).await;
assert_eq!(response.stream_key.as_str(), "abc", "Unexpected stream key");
let expected_endpoint_media_data = RtmpEndpointMediaData::from_media_notification_content(
media.content,
context.is_keyframe_metadata_key,
context.pts_offset_metadata_key,
)
.unwrap();
assert_eq!(
response.data, expected_endpoint_media_data,
"Unexpected media sent"
);
}
#[tokio::test]
async fn video_packet_with_other_stream_id_not_sent_to_watcher_media_channel() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let (_notification, mut media_channel) = context.accept_watch_registration().await;
let _publish_channel = context.accept_publish_registration().await;
let _ffmpeg_results = context.process_ffmpeg_event().await;
let media = MediaNotification {
stream_id: StreamId(Arc::new("test".to_string())),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
data: Bytes::from(vec![1, 2]),
timestamp: Duration::new(0, 0),
is_required_for_decoding: true,
metadata: MediaPayloadMetadataCollection::new(iter::empty(), &mut BytesMut::new()),
},
};
context.step_context.execute_with_media(media.clone());
test_utils::expect_mpsc_timeout(&mut media_channel).await;
}
#[tokio::test]
async fn video_packet_from_publisher_passed_as_media_output() {
let definition = DefinitionBuilder::new().build();
let mut context = TestContext::new(definition).unwrap();
context.step_context.execute_with_media(MediaNotification {
stream_id: StreamId(Arc::new("abc".to_string())),
content: MediaNotificationContent::NewIncomingStream {
stream_name: Arc::new("def".to_string()),
},
});
let _watch_channels = context.accept_watch_registration().await;
let publish_channel = context.accept_publish_registration().await;
let _ffmpeg_results = context.process_ffmpeg_event().await;
publish_channel
.send(RtmpEndpointPublisherMessage::NewVideoData {
publisher: ConnectionId(Arc::new("connection".to_string())),
data: Bytes::from(vec![1, 2, 3]),
timestamp: RtmpTimestamp::new(5),
is_keyframe: true,
is_sequence_header: true,
composition_time_offset: 123,
})
.expect("Failed to send video message");
context.step_context.execute_pending_futures().await;
assert_eq!(
context.step_context.media_outputs.len(),
1,
"Unexpected number of media outputs"
);
let media = &context.step_context.media_outputs[0];
assert_eq!(
media.stream_id.0.as_str(),
"abc",
"Expected media to have original stream id"
);
match &media.content {
MediaNotificationContent::MediaPayload {
media_type,
payload_type,
data,
timestamp,
is_required_for_decoding,
metadata,
} => {
let is_keyframe = metadata
.iter()
.filter(|m| m.key() == context.is_keyframe_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::Bool(val) => Some(val),
_ => None,
})
.next()
.unwrap_or_default();
let pts_offset = metadata
.iter()
.filter(|m| m.key() == context.pts_offset_metadata_key)
.filter_map(|m| match m.value() {
MetadataValue::I32(val) => Some(val),
_ => None,
})
.next()
.unwrap_or_default();
assert_eq!(*media_type, MediaType::Video);
assert_eq!(
*payload_type, *VIDEO_CODEC_H264_AVC,
"Unexpected payload type"
);
assert_eq!(data, &vec![1, 2, 3], "Unexpected bytes");
assert_eq!(timestamp, &Duration::from_millis(5), "Unexpected dts");
assert!(
is_required_for_decoding,
"Expected is_required_for_decoding to be true"
);
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | true |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/ffmpeg_transcode/mod.rs | mmids-ffmpeg/src/workflow_steps/ffmpeg_transcode/mod.rs | //! A workflow step that that utilizes the ffmpeg executable to transcode media streams. When a
//! new stream comes into the step, it will coordinate with the RTMP server endpoint to provision
//! a special app/stream key combination to push a video stream out and another app/stream key
//! combination to receive the transcoded video stream back.
//!
//! It will then request the ffmpeg endpoint to pull video from the output rtmp location, how
//! ffmpeg should transcode the video, and to send the resulting video back. The transcoded media
//! is then passed to onto the next step.
//!
//! Media notifications that this step receives are passed to the RTMP endpoint but are not
//! passed along to the next step. When the step receives transcoded media it will then pass those
//! to the next step.
#[cfg(test)]
mod tests;
use crate::endpoint::{
AudioTranscodeParams, FfmpegEndpointNotification, FfmpegEndpointRequest, FfmpegParams,
H264Preset, TargetParams, VideoScale, VideoTranscodeParams,
};
use bytes::BytesMut;
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKey, MetadataValue,
};
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::workflows::{MediaNotification, MediaNotificationContent, MediaType};
use mmids_core::StreamId;
use mmids_rtmp::rtmp_server::{
IpRestriction, RegistrationType, RtmpEndpointMediaData, RtmpEndpointMediaMessage,
RtmpEndpointPublisherMessage, RtmpEndpointRequest, RtmpEndpointWatcherNotification,
StreamKeyRegistration,
};
use mmids_rtmp::utils::stream_metadata_to_hash_map;
use std::collections::{HashMap, VecDeque};
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use tracing::{error, info, warn};
use uuid::Uuid;
const VIDEO_CODEC_NAME: &str = "vcodec";
const AUDIO_CODEC_NAME: &str = "acodec";
const H264_PRESET_NAME: &str = "h264_preset";
const SIZE_NAME: &str = "size";
const BITRATE_NAME: &str = "kbps";
/// Generates new ffmpeg transcoding step instances based on specified step definitions.
pub struct FfmpegTranscoderStepGenerator {
rtmp_server_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct FfmpegTranscoder {
definition: WorkflowStepDefinition,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
rtmp_server_endpoint: UnboundedSender<RtmpEndpointRequest>,
video_codec_params: VideoTranscodeParams,
audio_codec_params: AudioTranscodeParams,
video_scale_params: Option<VideoScale>,
bitrate: Option<u16>,
active_streams: HashMap<StreamId, ActiveStream>,
status: StepStatus,
metadata_buffer: BytesMut,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
#[derive(Debug)]
enum WatchRegistrationStatus {
Inactive,
Pending {
media_channel: UnboundedSender<RtmpEndpointMediaMessage>,
},
Active {
media_channel: UnboundedSender<RtmpEndpointMediaMessage>,
},
}
#[derive(Debug)]
enum PublishRegistrationStatus {
Inactive,
Pending,
Active,
}
#[derive(Debug)]
enum FfmpegStatus {
Inactive,
Pending,
Active,
}
struct ActiveStream {
id: StreamId,
stream_name: Arc<String>,
pending_media: VecDeque<MediaNotificationContent>,
rtmp_output_status: WatchRegistrationStatus,
rtmp_input_status: PublishRegistrationStatus,
ffmpeg_status: FfmpegStatus,
ffmpeg_id: Uuid,
}
enum FutureResult {
RtmpEndpointGone,
FfmpegEndpointGone,
RtmpWatchNotificationReceived(StreamId, RtmpEndpointWatcherNotification),
RtmpWatchChannelGone(StreamId),
RtmpPublishNotificationReceived(StreamId, RtmpEndpointPublisherMessage),
RtmpPublishChannelGone(StreamId),
FfmpegNotificationReceived(StreamId, FfmpegEndpointNotification),
FfmpegChannelGone(StreamId),
}
impl StepFutureResult for FutureResult {}
#[derive(Error, Debug)]
enum StepStartupError {
#[error("Invalid video codec specified ({0}). {} is a required field and valid values are: 'copy' and 'h264'", VIDEO_CODEC_NAME)]
InvalidVideoCodec(String),
#[error("Invalid audio codec specified ({0}). {} is a required field and valid values are: 'copy' and 'aac'", AUDIO_CODEC_NAME)]
InvalidAudioCodec(String),
#[error("Invalid h264 preset specified ({0}). {} is the name of any h264 profile (e.g. veryfast, medium, etc...)", H264_PRESET_NAME)]
InvalidH264Preset(String),
#[error(
"Invalid video size specified ({0}). {} must be in the format of '<width>x<height>'",
SIZE_NAME
)]
InvalidVideoSize(String),
#[error("Invalid bitrate specified ({0}). {} must be a number", BITRATE_NAME)]
InvalidBitrate(String),
}
impl FfmpegTranscoderStepGenerator {
pub fn new(
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
) -> Self {
FfmpegTranscoderStepGenerator {
rtmp_server_endpoint: rtmp_endpoint,
ffmpeg_endpoint,
is_keyframe_metadata_key,
pts_offset_metadata_key,
}
}
}
impl StepGenerator for FfmpegTranscoderStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let vcodec = match definition.parameters.get(VIDEO_CODEC_NAME) {
Some(Some(value)) => match value.to_lowercase().trim() {
"copy" => VideoTranscodeParams::Copy,
"h264" => match definition.parameters.get(H264_PRESET_NAME) {
Some(Some(value)) => match value.to_lowercase().trim() {
"ultrafast" => VideoTranscodeParams::H264 {
preset: H264Preset::UltraFast,
},
"superfast" => VideoTranscodeParams::H264 {
preset: H264Preset::SuperFast,
},
"veryfast" => VideoTranscodeParams::H264 {
preset: H264Preset::VeryFast,
},
"faster" => VideoTranscodeParams::H264 {
preset: H264Preset::Faster,
},
"fast" => VideoTranscodeParams::H264 {
preset: H264Preset::Fast,
},
"medium" => VideoTranscodeParams::H264 {
preset: H264Preset::Medium,
},
"slow" => VideoTranscodeParams::H264 {
preset: H264Preset::Slow,
},
"slower" => VideoTranscodeParams::H264 {
preset: H264Preset::Slower,
},
"veryslow" => VideoTranscodeParams::H264 {
preset: H264Preset::VerySlow,
},
x => {
return Err(Box::new(StepStartupError::InvalidH264Preset(
x.to_string(),
)))
}
},
_ => VideoTranscodeParams::H264 {
preset: H264Preset::VeryFast,
},
},
x => return Err(Box::new(StepStartupError::InvalidVideoCodec(x.to_string()))),
},
_ => {
return Err(Box::new(StepStartupError::InvalidVideoCodec(
"".to_string(),
)))
}
};
let acodec = match definition.parameters.get(AUDIO_CODEC_NAME) {
Some(Some(value)) => match value.to_lowercase().trim() {
"copy" => AudioTranscodeParams::Copy,
"aac" => AudioTranscodeParams::Aac,
x => return Err(Box::new(StepStartupError::InvalidAudioCodec(x.to_string()))),
},
_ => {
return Err(Box::new(StepStartupError::InvalidAudioCodec(
"".to_string(),
)))
}
};
let size = match definition.parameters.get(SIZE_NAME) {
Some(Some(value)) => {
let mut dimensions = Vec::new();
for part in value.split('x') {
match part.parse::<u16>() {
Ok(num) => dimensions.push(num),
Err(_) => {
return Err(Box::new(StepStartupError::InvalidVideoSize(value.clone())))
}
}
}
if dimensions.len() != 2 {
return Err(Box::new(StepStartupError::InvalidVideoSize(value.clone())));
}
Some(VideoScale {
width: dimensions[0],
height: dimensions[1],
})
}
_ => None,
};
let bitrate = match definition.parameters.get(BITRATE_NAME) {
Some(Some(value)) => {
if let Ok(num) = value.parse() {
Some(num)
} else {
return Err(Box::new(StepStartupError::InvalidBitrate(value.clone())));
}
}
_ => None,
};
let step = FfmpegTranscoder {
definition,
active_streams: HashMap::new(),
audio_codec_params: acodec,
rtmp_server_endpoint: self.rtmp_server_endpoint.clone(),
ffmpeg_endpoint: self.ffmpeg_endpoint.clone(),
video_scale_params: size,
video_codec_params: vcodec,
bitrate,
status: StepStatus::Active,
metadata_buffer: BytesMut::new(),
is_keyframe_metadata_key: self.is_keyframe_metadata_key,
pts_offset_metadata_key: self.pts_offset_metadata_key,
};
let ffmpeg_endpoint = self.ffmpeg_endpoint.clone();
futures_channel.send_on_generic_future_completion(async move {
ffmpeg_endpoint.closed().await;
FutureResult::FfmpegEndpointGone
});
let rtmp_endpoint = self.rtmp_server_endpoint.clone();
futures_channel.send_on_generic_future_completion(async move {
rtmp_endpoint.closed().await;
FutureResult::RtmpEndpointGone
});
let status = step.status.clone();
Ok((Box::new(step), status))
}
}
impl FfmpegTranscoder {
fn get_source_rtmp_app(&self) -> String {
format!("ffmpeg-transcoder-original-{}", self.definition.get_id())
}
fn get_result_rtmp_app(&self) -> String {
format!("ffmpeg-transcoder-result-{}", self.definition.get_id())
}
fn handle_resolved_future(
&mut self,
notification: Box<dyn StepFutureResult>,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
let notification = match notification.downcast::<FutureResult>() {
Ok(x) => *x,
Err(_) => return,
};
match notification {
FutureResult::FfmpegEndpointGone => {
error!("Ffmpeg endpoint is gone!");
self.status = StepStatus::Error {
message: "Ffmpeg endpoint is gone".to_string(),
};
let ids: Vec<StreamId> = self.active_streams.keys().cloned().collect();
for id in ids {
self.stop_stream(&id);
}
}
FutureResult::RtmpEndpointGone => {
error!("RTMP endpoint is gone!");
self.status = StepStatus::Error {
message: "Rtmp endpoint is gone".to_string(),
};
let ids: Vec<StreamId> = self.active_streams.keys().cloned().collect();
for id in ids {
self.stop_stream(&id);
}
}
FutureResult::RtmpWatchChannelGone(stream_id) => {
if self.stop_stream(&stream_id) {
error!(stream_id = ?stream_id, "Rtmp watch channel disappeared for stream id {:?}", stream_id);
}
}
FutureResult::RtmpPublishChannelGone(stream_id) => {
if self.stop_stream(&stream_id) {
error!(
stream_id = ?stream_id,
"Rtmp publish channel dissappeared for stream id {:?}", stream_id
);
}
}
FutureResult::FfmpegChannelGone(stream_id) => {
if self.stop_stream(&stream_id) {
error!(
stream_id = ?stream_id,
"Ffmpeg channel disappeared for stream id {:?}", stream_id
);
}
}
FutureResult::RtmpWatchNotificationReceived(stream_id, notification) => {
if !self.active_streams.contains_key(&stream_id) {
// late notification after stopping a stream
return;
}
self.handle_rtmp_watch_notification(stream_id, notification, futures_channel);
}
FutureResult::RtmpPublishNotificationReceived(stream_id, notification) => {
if !self.active_streams.contains_key(&stream_id) {
// late notification after stopping a stream
return;
}
self.handle_rtmp_publish_notification(
stream_id,
notification,
outputs,
futures_channel,
);
}
FutureResult::FfmpegNotificationReceived(stream_id, notification) => {
if !self.active_streams.contains_key(&stream_id) {
// late notification after stopping a stream
return;
}
self.handle_ffmpeg_notification(stream_id, notification, futures_channel);
}
}
}
fn handle_media(
&mut self,
media: MediaNotification,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
match &media.content {
MediaNotificationContent::NewIncomingStream { stream_name } => {
if let Some(stream) = self.active_streams.get(&media.stream_id) {
if stream.stream_name != *stream_name {
warn!(
stream_id = ?media.stream_id,
new_stream_name = %stream_name,
active_stream_name = %stream.stream_name,
"Unexpected new incoming stream notification received on \
stream id {:?} and stream name '{}', but we already have this stream id active \
for stream name '{}'. Ignoring this notification",
media.stream_id, stream_name, stream.stream_name);
} else {
// Since the stream id / name combination is already set, this is a duplicate
// notification. This is probably a bug somewhere but it's not harmful
// to ignore
}
return;
}
let stream = ActiveStream {
id: media.stream_id.clone(),
stream_name: stream_name.clone(),
pending_media: VecDeque::new(),
rtmp_output_status: WatchRegistrationStatus::Inactive,
rtmp_input_status: PublishRegistrationStatus::Inactive,
ffmpeg_status: FfmpegStatus::Inactive,
ffmpeg_id: Uuid::new_v4(),
};
self.active_streams.insert(media.stream_id.clone(), stream);
self.prepare_stream(media.stream_id.clone(), futures_channel);
outputs.media.push(media.clone());
}
MediaNotificationContent::StreamDisconnected => {
if self.stop_stream(&media.stream_id) {
info!(
stream_id = ?media.stream_id,
"Stopping stream id {:?} due to stream disconnection notification", media.stream_id
);
}
outputs.media.push(media.clone());
}
_ => {
if let Some(stream) = self.active_streams.get_mut(&media.stream_id) {
if let WatchRegistrationStatus::Active { media_channel } =
&stream.rtmp_output_status
{
if let Ok(media_data) =
RtmpEndpointMediaData::from_media_notification_content(
media.content,
self.is_keyframe_metadata_key,
self.pts_offset_metadata_key,
)
{
let _ = media_channel.send(RtmpEndpointMediaMessage {
stream_key: stream.id.0.clone(),
data: media_data,
});
}
} else {
stream.pending_media.push_back(media.content.clone());
}
}
}
}
}
fn prepare_stream(
&mut self,
stream_id: StreamId,
futures_channel: &WorkflowStepFuturesChannel,
) {
let source_rtmp_app = Arc::new(self.get_source_rtmp_app());
let result_rtmp_app = Arc::new(self.get_result_rtmp_app());
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
let (output_is_active, output_media_channel) = match &stream.rtmp_output_status {
WatchRegistrationStatus::Inactive => {
let (media_sender, media_receiver) = unbounded_channel();
let (watch_sender, watch_receiver) = unbounded_channel();
let _ =
self.rtmp_server_endpoint
.send(RtmpEndpointRequest::ListenForWatchers {
notification_channel: watch_sender,
rtmp_app: source_rtmp_app.clone(),
rtmp_stream_key: StreamKeyRegistration::Exact(stream.id.0.clone()),
port: 1935,
media_channel: media_receiver,
ip_restrictions: IpRestriction::None,
use_tls: false,
requires_registrant_approval: false,
});
let recv_stream_id = stream.id.clone();
let closed_stream_id = stream.id.clone();
futures_channel.send_on_generic_unbounded_recv(
watch_receiver,
move |message| {
FutureResult::RtmpWatchNotificationReceived(
recv_stream_id.clone(),
message,
)
},
move || FutureResult::RtmpWatchChannelGone(closed_stream_id),
);
stream.rtmp_output_status = WatchRegistrationStatus::Pending {
media_channel: media_sender,
};
(false, None)
}
WatchRegistrationStatus::Pending { media_channel: _ } => (false, None),
WatchRegistrationStatus::Active { media_channel } => (true, Some(media_channel)),
};
if output_is_active {
// If the output is active, we need to send any pending media out. Most likely this
// will contain sequence headers, and thus we need to get them up to the rtmp endpoint
// so clients don't miss them
if let Some(media_channel) = output_media_channel {
for media in stream.pending_media.drain(..) {
if let Ok(media_data) =
RtmpEndpointMediaData::from_media_notification_content(
media,
self.is_keyframe_metadata_key,
self.pts_offset_metadata_key,
)
{
let _ = media_channel.send(RtmpEndpointMediaMessage {
stream_key: stream.id.0.clone(),
data: media_data,
});
}
}
}
}
let input_is_active = match &stream.rtmp_input_status {
PublishRegistrationStatus::Inactive => {
let (sender, receiver) = unbounded_channel();
let _ =
self.rtmp_server_endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 1935,
rtmp_app: result_rtmp_app.clone(),
rtmp_stream_key: StreamKeyRegistration::Exact(stream.id.0.clone()),
stream_id: Some(stream.id.clone()),
message_channel: sender,
ip_restrictions: IpRestriction::None,
use_tls: false,
requires_registrant_approval: false,
});
let recv_stream_id = stream.id.clone();
let closed_stream_id = stream.id.clone();
futures_channel.send_on_generic_unbounded_recv(
receiver,
move |message| {
FutureResult::RtmpPublishNotificationReceived(
recv_stream_id.clone(),
message,
)
},
move || FutureResult::RtmpPublishChannelGone(closed_stream_id),
);
stream.rtmp_input_status = PublishRegistrationStatus::Pending;
false
}
PublishRegistrationStatus::Pending => false,
PublishRegistrationStatus::Active => true,
};
if let FfmpegStatus::Inactive = &stream.ffmpeg_status {
// Not worth starting ffmpeg until both input and outputs registrations are complete
if input_is_active && output_is_active {
let parameters = FfmpegParams {
read_in_real_time: true,
bitrate_in_kbps: self.bitrate,
input: format!("rtmp://localhost/{}/{}", source_rtmp_app, stream.id.0),
video_transcode: self.video_codec_params.clone(),
audio_transcode: self.audio_codec_params.clone(),
scale: self.video_scale_params.clone(),
target: TargetParams::Rtmp {
url: format!("rtmp://localhost/{}/{}", result_rtmp_app, stream.id.0),
},
};
let (sender, receiver) = unbounded_channel();
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StartFfmpeg {
id: stream.ffmpeg_id,
params: parameters,
notification_channel: sender,
});
let recv_stream_id = stream.id.clone();
let closed_stream_id = stream.id.clone();
futures_channel.send_on_generic_unbounded_recv(
receiver,
move |message| {
FutureResult::FfmpegNotificationReceived(
recv_stream_id.clone(),
message,
)
},
|| FutureResult::FfmpegChannelGone(closed_stream_id),
);
stream.ffmpeg_status = FfmpegStatus::Pending;
}
}
}
}
fn stop_stream(&mut self, stream_id: &StreamId) -> bool {
if let Some(stream) = self.active_streams.remove(stream_id) {
match &stream.ffmpeg_status {
FfmpegStatus::Pending => {
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StopFfmpeg {
id: stream.ffmpeg_id,
});
}
FfmpegStatus::Active => {
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StopFfmpeg {
id: stream.ffmpeg_id,
});
}
FfmpegStatus::Inactive => (),
}
let _ = self
.rtmp_server_endpoint
.send(RtmpEndpointRequest::RemoveRegistration {
registration_type: RegistrationType::Watcher,
port: 1935,
rtmp_app: Arc::new(self.get_source_rtmp_app()),
rtmp_stream_key: StreamKeyRegistration::Exact(stream.id.0.clone()),
});
let _ = self
.rtmp_server_endpoint
.send(RtmpEndpointRequest::RemoveRegistration {
registration_type: RegistrationType::Publisher,
port: 1935,
rtmp_app: Arc::new(self.get_result_rtmp_app()),
rtmp_stream_key: StreamKeyRegistration::Exact(stream.id.0),
});
return true;
}
false
}
fn handle_rtmp_watch_notification(
&mut self,
stream_id: StreamId,
notification: RtmpEndpointWatcherNotification,
futures_channel: &WorkflowStepFuturesChannel,
) {
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
match notification {
RtmpEndpointWatcherNotification::WatcherRegistrationSuccessful => {
let new_status = match &stream.rtmp_output_status {
WatchRegistrationStatus::Pending { media_channel } => {
info!(
stream_id = ?stream.id,
"Watch registration successful for stream id {:?}", stream.id
);
Some(WatchRegistrationStatus::Active {
media_channel: media_channel.clone(),
})
}
status => {
error!(
stream_id = ?stream.id,
"Received watch registration successful notification for stream id \
{:?}, but this stream's watch status is {:?}", stream.id, status
);
None
}
};
if let Some(new_status) = new_status {
stream.rtmp_output_status = new_status;
}
}
RtmpEndpointWatcherNotification::WatcherRegistrationFailed => {
warn!(
stream_id = ?stream.id,
"Received watch registration failed for stream id {:?}", stream.id
);
stream.rtmp_output_status = WatchRegistrationStatus::Inactive;
}
RtmpEndpointWatcherNotification::StreamKeyBecameActive {
stream_key: _,
reactor_update_channel: _,
} => (),
RtmpEndpointWatcherNotification::StreamKeyBecameInactive { stream_key: _ } => (),
RtmpEndpointWatcherNotification::WatcherRequiringApproval { .. } => {
error!("Watcher requires approval but all watchers should be auto-approved");
self.status = StepStatus::Error {
message:
"Watcher requires approval but all watchers should be auto-approved"
.to_string(),
};
}
}
}
self.prepare_stream(stream_id, futures_channel);
}
fn handle_rtmp_publish_notification(
&mut self,
stream_id: StreamId,
notification: RtmpEndpointPublisherMessage,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
let mut prepare_stream = false;
if let Some(stream) = self.active_streams.get_mut(&stream_id) {
match notification {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => {
warn!(
stream_id = ?stream_id,
"Rtmp publish registration failed for stream {:?}", stream_id
);
stream.rtmp_input_status = PublishRegistrationStatus::Inactive;
prepare_stream = true;
}
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => {
info!(
stream_id = ?stream_id,
"Rtmp publish registration successful for stream {:?}", stream_id
);
stream.rtmp_input_status = PublishRegistrationStatus::Active;
prepare_stream = true;
}
RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id: _,
stream_key: _,
connection_id: _,
reactor_update_channel: _,
} => (),
RtmpEndpointPublisherMessage::PublishingStopped { connection_id: _ } => (),
RtmpEndpointPublisherMessage::StreamMetadataChanged {
publisher: _,
metadata,
} => {
let metadata = stream_metadata_to_hash_map(metadata);
outputs.media.push(MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::Metadata { data: metadata },
});
}
RtmpEndpointPublisherMessage::NewVideoData {
publisher: _,
data,
is_sequence_header,
is_keyframe,
timestamp,
composition_time_offset,
} => {
let is_keyframe_metadata = MetadataEntry::new(
self.is_keyframe_metadata_key,
MetadataValue::Bool(is_keyframe),
&mut self.metadata_buffer,
)
.unwrap(); // Only fails from type mismatch
let pts_offset_metadata = MetadataEntry::new(
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | true |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/ffmpeg_rtmp_push/mod.rs | mmids-ffmpeg/src/workflow_steps/ffmpeg_rtmp_push/mod.rs | //! This step utilizes the ffmpeg endpoint to send media to an external system, such as another
//! RTMP server.
//!
//! Any incoming media packets are passed to the rtmp endpoint for sending to ffmpeg, and then
//! passed along as is for the next workflow step.
use crate::endpoint::{
AudioTranscodeParams, FfmpegEndpointRequest, FfmpegParams, TargetParams, VideoTranscodeParams,
};
use crate::workflow_steps::ffmpeg_handler::{FfmpegHandlerGenerator, FfmpegParameterGenerator};
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::metadata::MetadataKey;
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::StreamId;
use mmids_rtmp::rtmp_server::RtmpEndpointRequest;
use mmids_rtmp::workflow_steps::external_stream_reader::ExternalStreamReader;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::mpsc::UnboundedSender;
use tracing::error;
const TARGET: &str = "target";
/// Generates new instances of the ffmpeg rtmp push workflow step based on specified step definitions.
pub struct FfmpegRtmpPushStepGenerator {
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct FfmpegRtmpPushStep {
stream_reader: ExternalStreamReader,
}
enum FutureResult {
FfmpegEndpointGone,
}
impl StepFutureResult for FutureResult {}
#[derive(Error, Debug)]
enum StepStartupError {
#[error("No rtmp target specified. A 'target' parameter is required")]
NoTargetProvided,
}
struct ParamGenerator {
rtmp_app: String,
target: String,
}
impl FfmpegRtmpPushStepGenerator {
pub fn new(
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
) -> Self {
FfmpegRtmpPushStepGenerator {
rtmp_endpoint,
ffmpeg_endpoint,
is_keyframe_metadata_key,
pts_offset_metadata_key,
}
}
}
impl StepGenerator for FfmpegRtmpPushStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let target = match definition.parameters.get(TARGET) {
Some(Some(value)) => value,
_ => return Err(Box::new(StepStartupError::NoTargetProvided)),
};
let param_generator = ParamGenerator {
rtmp_app: get_rtmp_app(definition.get_id().to_string()),
target: target.to_string(),
};
let handler_generator =
FfmpegHandlerGenerator::new(self.ffmpeg_endpoint.clone(), Box::new(param_generator));
let reader = ExternalStreamReader::new(
Arc::new(format!("ffmpeg-rtmp-push-{}", definition.get_id())),
self.rtmp_endpoint.clone(),
Box::new(handler_generator),
self.is_keyframe_metadata_key,
self.pts_offset_metadata_key,
&futures_channel,
);
let step = FfmpegRtmpPushStep {
stream_reader: reader,
};
let ffmpeg_endpoint = self.ffmpeg_endpoint.clone();
futures_channel.send_on_generic_future_completion(async move {
ffmpeg_endpoint.closed().await;
FutureResult::FfmpegEndpointGone
});
Ok((Box::new(step), StepStatus::Active))
}
}
impl WorkflowStep for FfmpegRtmpPushStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
if let StepStatus::Error { message } = &self.stream_reader.status {
error!("External stream reader is in error status, so putting the step in in error status as well.");
return StepStatus::Error {
message: message.to_string(),
};
}
for future_result in inputs.notifications.drain(..) {
match future_result.downcast::<FutureResult>() {
Err(future_result) => {
// Not a future we can handle, it may be a future for the external stream reader
self.stream_reader
.handle_resolved_future(future_result, &futures_channel)
}
Ok(future_result) => match *future_result {
FutureResult::FfmpegEndpointGone => {
error!("Ffmpeg endpoint has disappeared. Closing all streams");
self.stream_reader.stop_all_streams();
}
},
};
}
for media in inputs.media.drain(..) {
self.stream_reader
.handle_media(media, outputs, &futures_channel);
}
self.stream_reader.status.clone()
}
}
impl FfmpegParameterGenerator for ParamGenerator {
fn form_parameters(&self, stream_id: &StreamId, _stream_name: &str) -> FfmpegParams {
FfmpegParams {
read_in_real_time: true,
input: format!("rtmp://localhost/{}/{}", self.rtmp_app, stream_id.0),
video_transcode: VideoTranscodeParams::Copy,
audio_transcode: AudioTranscodeParams::Copy,
scale: None,
bitrate_in_kbps: None,
target: TargetParams::Rtmp {
url: self.target.clone(),
},
}
}
}
fn get_rtmp_app(id: String) -> String {
format!("ffmpeg-rtmp-push-{}", id)
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/ffmpeg_pull/mod.rs | mmids-ffmpeg/src/workflow_steps/ffmpeg_pull/mod.rs | //! This workflow step utilizes ffmpeg to read video from an external source. The external source
//! can be a remote RTMP server or a file on the file system. If ffmpeg closes (such as when the
//! video file has been fully streamed) then the ffmpeg will restart until the workflow is
//! removed.
//!
//! Media packets that come in from previous steps are ignored.
use crate::endpoint::{
AudioTranscodeParams, FfmpegEndpointNotification, FfmpegEndpointRequest, FfmpegParams,
TargetParams, VideoTranscodeParams,
};
use bytes::BytesMut;
use mmids_core::codecs::{AUDIO_CODEC_AAC_RAW, VIDEO_CODEC_H264_AVC};
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::metadata::{
MediaPayloadMetadataCollection, MetadataEntry, MetadataKey, MetadataValue,
};
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::workflows::{MediaNotification, MediaNotificationContent, MediaType};
use mmids_core::StreamId;
use mmids_rtmp::rtmp_server::{
IpRestriction, RegistrationType, RtmpEndpointPublisherMessage, RtmpEndpointRequest,
StreamKeyRegistration,
};
use std::iter;
use std::sync::Arc;
use std::time::Duration;
use thiserror::Error;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use tracing::{error, info};
use uuid::Uuid;
pub const LOCATION: &str = "location";
pub const STREAM_NAME: &str = "stream_name";
/// Generates new instances of the ffmpeg pull workflow step based on specified step definitions.
pub struct FfmpegPullStepGenerator {
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct FfmpegPullStep {
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
status: StepStatus,
rtmp_app: Arc<String>,
pull_location: String,
stream_name: Arc<String>,
ffmpeg_id: Option<Uuid>,
active_stream_id: Option<StreamId>,
metadata_buffer: BytesMut,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
enum FutureResult {
RtmpEndpointGone,
FfmpegEndpointGone,
RtmpEndpointResponseReceived(RtmpEndpointPublisherMessage),
FfmpegNotificationReceived(FfmpegEndpointNotification),
}
impl StepFutureResult for FutureResult {}
#[derive(Error, Debug)]
enum StepStartupError {
#[error("No {} parameter specified", LOCATION)]
NoLocationSpecified,
#[error("No {} parameter specified", STREAM_NAME)]
NoStreamNameSpecified,
}
impl FfmpegPullStepGenerator {
pub fn new(
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
) -> Self {
FfmpegPullStepGenerator {
rtmp_endpoint,
ffmpeg_endpoint,
is_keyframe_metadata_key,
pts_offset_metadata_key,
}
}
}
impl StepGenerator for FfmpegPullStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let location = match definition.parameters.get(LOCATION) {
Some(Some(value)) => value.clone(),
_ => return Err(Box::new(StepStartupError::NoLocationSpecified)),
};
let stream_name = match definition.parameters.get(STREAM_NAME) {
Some(Some(value)) => Arc::new(value.clone()),
_ => return Err(Box::new(StepStartupError::NoStreamNameSpecified)),
};
let step = FfmpegPullStep {
status: StepStatus::Created,
rtmp_app: Arc::new(format!("ffmpeg-pull-{}", definition.get_id())),
ffmpeg_endpoint: self.ffmpeg_endpoint.clone(),
rtmp_endpoint: self.rtmp_endpoint.clone(),
pull_location: location,
stream_name: stream_name.clone(),
ffmpeg_id: None,
active_stream_id: None,
metadata_buffer: BytesMut::new(),
is_keyframe_metadata_key: self.is_keyframe_metadata_key,
pts_offset_metadata_key: self.pts_offset_metadata_key,
};
let (sender, receiver) = unbounded_channel();
let _ = self
.rtmp_endpoint
.send(RtmpEndpointRequest::ListenForPublishers {
port: 1935,
rtmp_app: step.rtmp_app.clone(),
rtmp_stream_key: StreamKeyRegistration::Exact(stream_name),
stream_id: None,
message_channel: sender,
ip_restrictions: IpRestriction::None,
use_tls: false,
requires_registrant_approval: false,
});
let ffmpeg_endpoint = self.ffmpeg_endpoint.clone();
futures_channel.send_on_generic_future_completion(async move {
ffmpeg_endpoint.closed().await;
FutureResult::FfmpegEndpointGone
});
futures_channel.send_on_generic_unbounded_recv(
receiver,
FutureResult::RtmpEndpointResponseReceived,
|| FutureResult::RtmpEndpointGone,
);
let status = step.status.clone();
Ok((Box::new(step), status))
}
}
impl FfmpegPullStep {
fn handle_resolved_future(
&mut self,
result: FutureResult,
outputs: &mut StepOutputs,
futures_channel: &WorkflowStepFuturesChannel,
) {
match result {
FutureResult::FfmpegEndpointGone => {
error!("Ffmpeg endpoint is gone");
self.status = StepStatus::Error {
message: "Ffmpeg endpoint is gone".to_string(),
};
self.stop_ffmpeg();
}
FutureResult::RtmpEndpointGone => {
error!("Rtmp endpoint gone");
self.status = StepStatus::Error {
message: "Rtmp endpoint gone".to_string(),
};
self.stop_ffmpeg();
}
FutureResult::RtmpEndpointResponseReceived(response) => {
self.handle_rtmp_notification(outputs, response, futures_channel);
}
FutureResult::FfmpegNotificationReceived(notification) => {
self.handle_ffmpeg_notification(notification);
}
}
}
fn handle_ffmpeg_notification(&mut self, message: FfmpegEndpointNotification) {
match message {
FfmpegEndpointNotification::FfmpegFailedToStart { cause } => {
error!("Ffmpeg failed to start: {:?}", cause);
self.status = StepStatus::Error {
message: format!("Ffmpeg failed to start: {:?}", cause),
};
}
FfmpegEndpointNotification::FfmpegStarted => {
info!("Ffmpeg started");
}
FfmpegEndpointNotification::FfmpegStopped => {
info!("Ffmpeg stopped");
}
}
}
fn handle_rtmp_notification(
&mut self,
outputs: &mut StepOutputs,
message: RtmpEndpointPublisherMessage,
futures_channel: &WorkflowStepFuturesChannel,
) {
match message {
RtmpEndpointPublisherMessage::PublisherRegistrationFailed => {
error!("Publisher registration failed");
self.status = StepStatus::Error {
message: "Publisher registration failed".to_string(),
};
}
RtmpEndpointPublisherMessage::PublisherRegistrationSuccessful => {
info!("Publisher registration successful");
self.status = StepStatus::Active;
self.start_ffmpeg(futures_channel);
}
RtmpEndpointPublisherMessage::NewPublisherConnected {
stream_id,
stream_key,
connection_id,
reactor_update_channel: _,
} => {
info!(
stream_id = ?stream_id,
connection_id = ?connection_id,
stream_key = %stream_key,
"New RTMP publisher seen: {:?}, {:?}, {:?}", stream_id, connection_id, stream_key
);
if stream_key != self.stream_name {
error!(
stream_name = %self.stream_name,
stream_key = %stream_key,
"Expected publisher to have a stream name of {} but instead it was {}", self.stream_name, stream_key
);
self.status = StepStatus::Error {
message: format!(
"Expected publisher to have a stream name of {} but instead it was {}",
self.stream_name, stream_key
),
};
self.stop_ffmpeg();
}
self.active_stream_id = Some(stream_id.clone());
outputs.media.push(MediaNotification {
stream_id,
content: MediaNotificationContent::NewIncomingStream {
stream_name: self.stream_name.clone(),
},
});
}
RtmpEndpointPublisherMessage::PublishingStopped { connection_id: _ } => {
info!("RTMP publisher has stopped");
if let Some(stream_id) = &self.active_stream_id {
outputs.media.push(MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::StreamDisconnected,
});
}
}
RtmpEndpointPublisherMessage::StreamMetadataChanged {
publisher: _,
metadata,
} => {
if let Some(stream_id) = &self.active_stream_id {
outputs.media.push(MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::Metadata {
data: mmids_rtmp::utils::stream_metadata_to_hash_map(metadata),
},
});
} else {
error!("Received stream metadata without an active stream id");
self.stop_ffmpeg();
self.status = StepStatus::Error {
message: "Received stream metadata without an active stream id".to_string(),
};
}
}
RtmpEndpointPublisherMessage::NewVideoData {
publisher: _,
data,
is_keyframe,
is_sequence_header,
timestamp,
composition_time_offset,
} => {
if let Some(stream_id) = &self.active_stream_id {
let is_keyframe_metadata = MetadataEntry::new(
self.is_keyframe_metadata_key,
MetadataValue::Bool(is_keyframe),
&mut self.metadata_buffer,
)
.unwrap(); // Should only happen if type mismatch occurs
let pts_offset_metadata = MetadataEntry::new(
self.pts_offset_metadata_key,
MetadataValue::I32(composition_time_offset),
&mut self.metadata_buffer,
)
.unwrap(); // Should only happen if type mismatch occurs
let metadata = MediaPayloadMetadataCollection::new(
[is_keyframe_metadata, pts_offset_metadata].into_iter(),
&mut self.metadata_buffer,
);
outputs.media.push(MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::MediaPayload {
media_type: MediaType::Video,
payload_type: VIDEO_CODEC_H264_AVC.clone(),
is_required_for_decoding: is_sequence_header,
timestamp: Duration::from_millis(timestamp.value.into()),
metadata,
data,
},
});
} else {
error!("Received video data without an active stream id");
self.stop_ffmpeg();
self.status = StepStatus::Error {
message: "Received video data without an active stream id".to_string(),
};
}
}
RtmpEndpointPublisherMessage::NewAudioData {
publisher: _,
data,
is_sequence_header,
timestamp,
} => {
if let Some(stream_id) = &self.active_stream_id {
outputs.media.push(MediaNotification {
stream_id: stream_id.clone(),
content: MediaNotificationContent::MediaPayload {
timestamp: Duration::from_millis(timestamp.value as u64),
is_required_for_decoding: is_sequence_header,
data,
media_type: MediaType::Audio,
payload_type: AUDIO_CODEC_AAC_RAW.clone(),
metadata: MediaPayloadMetadataCollection::new(
iter::empty(),
&mut self.metadata_buffer,
),
},
});
} else {
error!("Received audio data without an active stream id");
self.stop_ffmpeg();
self.status = StepStatus::Error {
message: "Received audio data without an active stream id".to_string(),
};
}
}
RtmpEndpointPublisherMessage::PublisherRequiringApproval { .. } => {
error!("Publisher approval requested but publishers should be auto-approved");
self.status = StepStatus::Error {
message: "Publisher approval requested but publishers should be auto-approved"
.to_string(),
};
}
}
}
fn start_ffmpeg(&mut self, futures_channel: &WorkflowStepFuturesChannel) {
if self.ffmpeg_id.is_none() {
info!("Starting ffmpeg");
let id = Uuid::new_v4();
let (sender, receiver) = unbounded_channel();
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StartFfmpeg {
id,
notification_channel: sender,
params: FfmpegParams {
read_in_real_time: true,
input: self.pull_location.clone(),
video_transcode: VideoTranscodeParams::Copy,
audio_transcode: AudioTranscodeParams::Copy,
scale: None,
bitrate_in_kbps: None,
target: TargetParams::Rtmp {
url: format!("rtmp://localhost/{}/{}", self.rtmp_app, self.stream_name),
},
},
});
futures_channel.send_on_generic_unbounded_recv(
receiver,
FutureResult::FfmpegNotificationReceived,
|| FutureResult::FfmpegEndpointGone,
);
}
}
fn stop_ffmpeg(&mut self) {
if let Some(id) = &self.ffmpeg_id {
let _ = self
.ffmpeg_endpoint
.send(FfmpegEndpointRequest::StopFfmpeg { id: *id });
}
self.ffmpeg_id = None;
}
}
impl WorkflowStep for FfmpegPullStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
for result in inputs.notifications.drain(..) {
if let Ok(result) = result.downcast::<FutureResult>() {
self.handle_resolved_future(*result, outputs, &futures_channel);
if matches!(&self.status, &StepStatus::Error { .. }) {
break;
}
}
}
self.status.clone()
}
}
impl Drop for FfmpegPullStep {
fn drop(&mut self) {
self.stop_ffmpeg();
let _ = self
.rtmp_endpoint
.send(RtmpEndpointRequest::RemoveRegistration {
registration_type: RegistrationType::Publisher,
port: 1935,
rtmp_app: self.rtmp_app.clone(),
rtmp_stream_key: StreamKeyRegistration::Exact(self.stream_name.clone()),
});
}
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
KallDrexx/mmids | https://github.com/KallDrexx/mmids/blob/3d732616da3bc8976dbcd5f859758ed3f06dc38f/mmids-ffmpeg/src/workflow_steps/ffmpeg_hls/mod.rs | mmids-ffmpeg/src/workflow_steps/ffmpeg_hls/mod.rs | //! This step utilizes ffmpeg to create an HLS playlist.
//!
//! Media packets that are received from previous steps are passed to the RTMP endpoint for ffmpeg
//! consumption, and then passed on to the next step as-is.
use crate::endpoint::{
AudioTranscodeParams, FfmpegEndpointRequest, FfmpegParams, TargetParams, VideoTranscodeParams,
};
use crate::workflow_steps::ffmpeg_handler::{FfmpegHandlerGenerator, FfmpegParameterGenerator};
use mmids_core::workflows::definitions::WorkflowStepDefinition;
use mmids_core::workflows::metadata::MetadataKey;
use mmids_core::workflows::steps::factory::StepGenerator;
use mmids_core::workflows::steps::futures_channel::WorkflowStepFuturesChannel;
use mmids_core::workflows::steps::{
StepCreationResult, StepFutureResult, StepInputs, StepOutputs, StepStatus, WorkflowStep,
};
use mmids_core::StreamId;
use mmids_rtmp::rtmp_server::RtmpEndpointRequest;
use mmids_rtmp::workflow_steps::external_stream_reader::ExternalStreamReader;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::mpsc::UnboundedSender;
use tracing::error;
const PATH: &str = "path";
const SEGMENT_DURATION: &str = "duration";
const SEGMENT_COUNT: &str = "count";
const STREAM_NAME: &str = "stream_name";
/// Generates new instances of the ffmpeg HLS workflow step based on specified step definitions.
pub struct FfmpegHlsStepGenerator {
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
}
struct FfmpegHlsStep {
status: StepStatus,
stream_reader: ExternalStreamReader,
path: String,
}
enum FutureResult {
FfmpegEndpointGone,
HlsPathCreated(tokio::io::Result<()>),
}
impl StepFutureResult for FutureResult {}
#[derive(Error, Debug)]
enum StepStartupError {
#[error("No path specified. A 'path' is required")]
NoPathProvided,
#[error("Invalid duration of '{0}'. {} should be a number.", SEGMENT_DURATION)]
InvalidSegmentLength(String),
#[error(
"Invalid segment count of '{0}'. {} should be a positive number",
SEGMENT_COUNT
)]
InvalidSegmentCount(String),
}
struct ParamGenerator {
rtmp_app: Arc<String>,
path: String,
segment_duration: u16,
segment_count: u16,
stream_name: Option<String>,
}
impl FfmpegHlsStepGenerator {
pub fn new(
rtmp_endpoint: UnboundedSender<RtmpEndpointRequest>,
ffmpeg_endpoint: UnboundedSender<FfmpegEndpointRequest>,
is_keyframe_metadata_key: MetadataKey,
pts_offset_metadata_key: MetadataKey,
) -> Self {
FfmpegHlsStepGenerator {
rtmp_endpoint,
ffmpeg_endpoint,
is_keyframe_metadata_key,
pts_offset_metadata_key,
}
}
}
impl StepGenerator for FfmpegHlsStepGenerator {
fn generate(
&self,
definition: WorkflowStepDefinition,
futures_channel: WorkflowStepFuturesChannel,
) -> StepCreationResult {
let path = match definition.parameters.get(PATH) {
Some(Some(value)) => value,
_ => return Err(Box::new(StepStartupError::NoPathProvided)),
};
let duration = match definition.parameters.get(SEGMENT_DURATION) {
Some(Some(value)) => match value.parse() {
Ok(num) => num,
Err(_) => {
return Err(Box::new(StepStartupError::InvalidSegmentLength(
value.clone(),
)));
}
},
_ => 2,
};
let count = match definition.parameters.get(SEGMENT_COUNT) {
Some(Some(value)) => match value.parse::<u16>() {
Ok(num) => num,
Err(_) => {
return Err(Box::new(StepStartupError::InvalidSegmentCount(
value.clone(),
)));
}
},
_ => 0,
};
let stream_name = definition.parameters.get(STREAM_NAME).cloned().flatten();
let rtmp_app = Arc::new(get_rtmp_app(definition.get_id().to_string()));
let param_generator = ParamGenerator {
rtmp_app: rtmp_app.clone(),
path: path.clone(),
segment_duration: duration,
segment_count: count,
stream_name,
};
let handler_generator =
FfmpegHandlerGenerator::new(self.ffmpeg_endpoint.clone(), Box::new(param_generator));
let reader = ExternalStreamReader::new(
rtmp_app,
self.rtmp_endpoint.clone(),
Box::new(handler_generator),
self.is_keyframe_metadata_key,
self.pts_offset_metadata_key,
&futures_channel,
);
let path = path.clone();
let step = FfmpegHlsStep {
status: StepStatus::Created,
stream_reader: reader,
path: path.clone(),
};
let ffmpeg_endpoint = self.ffmpeg_endpoint.clone();
futures_channel.send_on_generic_future_completion(async move {
ffmpeg_endpoint.closed().await;
FutureResult::FfmpegEndpointGone
});
futures_channel.send_on_generic_future_completion(async move {
let result = tokio::fs::create_dir_all(&path).await;
FutureResult::HlsPathCreated(result)
});
let status = step.status.clone();
Ok((Box::new(step), status))
}
}
impl WorkflowStep for FfmpegHlsStep {
fn execute(
&mut self,
inputs: &mut StepInputs,
outputs: &mut StepOutputs,
futures_channel: WorkflowStepFuturesChannel,
) -> StepStatus {
if let StepStatus::Error { message } = &self.stream_reader.status {
error!("external stream reader is in error status, so putting the step in in error status as well.");
return StepStatus::Error {
message: message.to_string(),
};
}
for future_result in inputs.notifications.drain(..) {
match future_result.downcast::<FutureResult>() {
Err(future_result) => {
// Not a future we can handle
self.stream_reader
.handle_resolved_future(future_result, &futures_channel)
}
Ok(future_result) => match *future_result {
FutureResult::FfmpegEndpointGone => {
error!("Ffmpeg endpoint has disappeared. Closing all streams");
self.stream_reader.stop_all_streams();
return StepStatus::Error {
message: "Ffmpeg endpoint gone".to_string(),
};
}
FutureResult::HlsPathCreated(result) => match result {
Ok(()) => {
self.status = StepStatus::Active;
}
Err(error) => {
error!("Could not create HLS path: '{}': {:?}", self.path, error);
return StepStatus::Error {
message: format!(
"Could not create HLS path: '{}': {:?}",
self.path, error
),
};
}
},
},
};
}
for media in inputs.media.drain(..) {
self.stream_reader
.handle_media(media, outputs, &futures_channel);
}
self.status.clone()
}
}
impl Drop for FfmpegHlsStep {
fn drop(&mut self) {
self.stream_reader.stop_all_streams();
}
}
impl FfmpegParameterGenerator for ParamGenerator {
fn form_parameters(&self, stream_id: &StreamId, stream_name: &str) -> FfmpegParams {
FfmpegParams {
read_in_real_time: true,
input: format!("rtmp://localhost/{}/{}", self.rtmp_app, stream_id.0),
video_transcode: VideoTranscodeParams::Copy,
audio_transcode: AudioTranscodeParams::Copy,
scale: None,
bitrate_in_kbps: None,
target: TargetParams::Hls {
path: format!(
"{}/{}.m3u8",
self.path,
self.stream_name.as_deref().unwrap_or(stream_name)
),
max_entries: Some(self.segment_count),
segment_length: self.segment_duration,
},
}
}
}
fn get_rtmp_app(id: String) -> String {
format!("ffmpeg-hls-{}", id)
}
| rust | MIT | 3d732616da3bc8976dbcd5f859758ed3f06dc38f | 2026-01-04T20:17:23.887411Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/lib.rs | src/lib.rs | //! Fast cryptographic accumulator and vector commitment library, originally written by Cambrian
//! Technologies [\[GitHub\]](https://github.com/cambrian/accumulator).
//!
//! **Disclaimer**: This library is intended to be production-quality code, but it has not been
//! independently-audited for correctness or tested to a critical degree. As such, please treat this
//! library as **research-grade** for the time being.
//!
//! # Important Note
//!
//! To ensure correspondence between accumulator methods and logical set operations in your
//! application, you must ensure that **no element is accumulated twice**. In particular, deleting
//! a doubly-accumulated element will remove only one "copy" of it from the accumulator, meaning
//! that its membership can still be verified. Hence, an accumulator without this invariant can be
//! viewed as a multiset.
//!
//! # What is an accumulator?
//!
//! An accumulator is a cryptographic primitive which functions essentially as a secure
//! decentralized set. It allows parties to maintain consensus on a set of values via a
//! _succinct binding commitment_ as well as to issue _efficiently verifiable (non)membership
//! proofs_ for elements of interest, all without requiring any party to store the entire set.
//!
//! Similarly to a Merkle tree, the accumulator stores its state commitment in constant space. A
//! notable difference, however, is that its inclusion and exclusion proofs also take up constant
//! space, and can be verified in constant time. For a far more detailed discussion of accumulators
//! as implemented here, see _Batching Techniques for Accumulators with Applications to IOPs and
//! Stateless Blockchains_ (Boneh, Bünz, and Fisch 2018)
//! [\[Link\]](https://eprint.iacr.org/2018/1188.pdf).
//!
//! Throughout our code, we refer to this paper as `BBF`. We also refer to another paper, _Universal
//! Accumulators with Efficient Nonmembership Proofs_ (Li, Li, Xue 2007)
//! [\[Link\]](https://link.springer.com/content/pdf/10.1007/978-3-540-72738-5_17.pdf), abbreviated
//! henceforth as `LLX`.
//!
//! # What is a vector commitment?
//!
//! A vector commitment (VC) is a closely-related primitive, distinguished from an accumulator in
//! that it provides a _position-binding_ commitment to state. That is, a VC allows parties to
//! prove or disprove that a certain element exists at a certain position.
//!
//! (Think VC : Vector :: Accumulator : Set.)
//!
//! Our vector commitment implementation is a work-in-progress (WIP), and should be treated with
//! even more skepticism than our accumulators.
//!
//! # Usage
//! ```
//! // A very basic example.
//! use accumulator::Accumulator;
//! use accumulator::group::Rsa2048;
//!
//! let acc = Accumulator::<Rsa2048, &'static str>::empty();
//!
//! // Accumulate "dog" and "cat". The `add_with_proof` method returns the new accumulator state
//! // and a proof that you accumulated "dog" and "cat".
//! let (acc, proof) = acc.add_with_proof(&["dog", "cat"]);
//!
//! // A network participant who sees (acc, proof, and ["dog", "cat"]) can verify that the update
//! // was formed correctly ...
//! assert!(acc.verify_membership_batch(&["dog", "cat"], &proof));
//!
//! // ... and trying to verify something that has not been accumulated will fail.
//! assert!(!acc.verify_membership(&"cow", &proof));
//! ```
//!
//! Typical users of this library will access public-facing routines on `accumulator` and
//! `vector_commitment`. However, we also export internal modules for useful traits, types (such as
//! the `Rsa2048` group), and specialized procedures. **Use internal components at your own risk**.
//!
//! You can find a more interesting application of our library
//! [here](https://github.com/cambrian/accumulator-demo), where we create a proof-of-concept for
//! stateless Bitcoin nodes!
//!
//! # Groups
//!
//! Accumulator and vector commitment operations take place over algebraic groups with certain
//! cryptographic properties. We provide implementations for two suitable groups:
//! (1) an RSA group with the [RSA-2048 modulus](https://en.wikipedia.org/wiki/RSA_numbers#RSA-2048)
//! and (2) an ideal class group with a fixed discriminant generated by OpenSSL.
//!
//! The RSA group is fast but relies on the security of the RSA-2048 modulus and needs trusted
//! setup if using a different modulus. The class group is slower but eliminates the need for a
//! trusted setup. For more on class groups, please visit this
//! [thorough explainer](https://www.michaelstraka.com/posts/classgroups/) by contributor Michael
//! Straka.
//!
//! # Performance
//!
//! Most accumulator or vector commitment functions will bottleneck in hashing to large primes. To
//! alleviate this, we created a zero-allocation `U256` type that uses the low-level `mpn_`
//! functions in [GMP](https://gmplib.org). Our `hash_to_prime` uses this type internally.
//!
//! Class groups are currently not performant for any meaningful use case. A pull request is in the
//! works to drastically improve their performance using techniques learned from the
//! [Chia VDF competition](https://github.com/Chia-Network/vdf-competition).
#![allow(clippy::unknown_clippy_lints)]
#![allow(clippy::many_single_char_names)]
#![allow(clippy::empty_enum)]
#![warn(missing_docs)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate arrayref;
mod accumulator;
pub use crate::accumulator::*;
mod vector_commitment;
pub use vector_commitment::*;
pub mod group;
pub mod hash;
pub mod proof;
#[allow(missing_docs)]
pub mod uint;
pub mod util;
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/vector_commitment.rs | src/vector_commitment.rs | //! Vector commitment library, built on a generic group interface. **Very much a WIP.**
use super::accumulator::{Accumulator, MembershipProof, NonmembershipProof, Witness};
use crate::group::UnknownOrderGroup;
use rug::Integer;
use std::collections::HashSet;
#[derive(Debug)]
/// The different types of vector commitment errors.
pub enum VCError {
/// When there are conflicting indices in the vector commitment.
ConflictingIndices,
/// When an opening fails.
InvalidOpen,
/// Unexpected state during an update.
UnexpectedState,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// A vector commitment, wrapping an underlying accumulator. The accumulator contains indices of an
/// abstract vector where the corresponding bit is True.
pub struct VectorCommitment<G: UnknownOrderGroup>(Accumulator<G, Integer>);
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// A vector commitment proof.
pub struct VectorProof<G: UnknownOrderGroup> {
membership_proof: MembershipProof<G, Integer>,
nonmembership_proof: NonmembershipProof<G, Integer>,
}
fn group_elems_by_bit(bits: &[(bool, Integer)]) -> Result<(Vec<Integer>, Vec<Integer>), VCError> {
let mut elems_with_one = vec![];
let mut elems_with_zero = vec![];
let mut seen_indices = HashSet::new();
for (bit, i) in bits {
if !seen_indices.insert(i) {
return Err(VCError::ConflictingIndices);
}
if *bit {
elems_with_one.push(i.clone());
} else {
elems_with_zero.push(i.clone());
}
}
Ok((elems_with_zero, elems_with_one))
}
impl<G: UnknownOrderGroup> VectorCommitment<G> {
/// Initializes a new vector commitment (VC).
pub fn empty() -> Self {
Self(Accumulator::<G, Integer>::empty())
}
/// Updates a VC with a list of values and indices.
///
/// # Arguments
///
/// * `vc_acc_set` - All indices that are set (True).
/// * `bits` - Tuples (truth value, bit index) to set.
///
/// Uses a move instead of a `&self` reference to prevent accidental use of the old VC state.
pub fn update(
vc: Self,
vc_acc_set: &[Integer],
bits: &[(bool, Integer)],
) -> Result<(Self, VectorProof<G>), VCError> {
let (elems_with_zero, elems_with_one) = group_elems_by_bit(&bits)?;
let (new_acc, membership_proof) = vc.0.add_with_proof(&elems_with_one);
let nonmembership_proof = new_acc
.prove_nonmembership(vc_acc_set, &elems_with_zero)
.map_err(|_| VCError::UnexpectedState)?;
Ok((
Self(new_acc),
VectorProof {
membership_proof,
nonmembership_proof,
},
))
}
/// Opens/generates a commitment to indices in the VC.
///
/// # Arguments
/// * `vc_acc_set` - All indices that are set (True).
/// * `zero_bits` - Indices you want to prove are unset (False).
/// * `one_bit_witnesses` - Indices you want to prove are set (True) and their witnesses.
pub fn open(
vc: &Self,
vc_acc_set: &[Integer],
zero_bits: &[Integer],
one_bit_witnesses: &[(Integer, Witness<G, Integer>)],
) -> Result<VectorProof<G>, VCError> {
let membership_proof = vc
.0
.prove_membership(one_bit_witnesses)
.map_err(|_| VCError::InvalidOpen)?;
let nonmembership_proof = vc
.0
.prove_nonmembership(vc_acc_set, zero_bits)
.map_err(|_| VCError::InvalidOpen)?;
Ok(VectorProof {
membership_proof,
nonmembership_proof,
})
}
/// Verifies a commitment to indices in the VC.
///
/// # Arguments
///
/// * `bits` - Tuples (truth value, bit index) to verify.
/// * `VectorProof` - A `VectorProof` to verify against.
pub fn verify(
vc: &Self,
bits: &[(bool, Integer)],
VectorProof {
membership_proof,
nonmembership_proof,
}: &VectorProof<G>,
) -> bool {
let group_result = group_elems_by_bit(&bits);
if group_result.is_err() {
return false;
}
let (elems_with_zero, elems_with_one) = group_result.unwrap();
let verified_membership = vc
.0
.verify_membership_batch(&elems_with_one, membership_proof);
let verified_nonmembership = vc
.0
.verify_nonmembership(&elems_with_zero, nonmembership_proof);
verified_membership && verified_nonmembership
}
}
// TODO: Write tests.
#[cfg(test)]
mod tests {}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/uint.rs | src/uint.rs | //! Zero-allocation U256 and U512 types built on GMP. We created this module specifically for our
//! use case of implementing primality checking over 256-bit integers, but it may be worth
//! polishing a bit for more general use.
//!
//! Obviously there are a lot of `unsafe` blocks to work with GMP. Take care when using this module
//! because there may be bugs we did not catch.
//!
//! TODO: Benchmark our U256 vs. 256-bit `rug::Integer` vs. Parity U256.
#![allow(clippy::cast_sign_loss)]
use gmp_mpfr_sys::gmp;
use gmp_mpfr_sys::gmp::mpz_t;
use rug::integer::Order;
use rug::Integer;
use std::cmp::{min, Ord, Ordering, PartialOrd};
use std::convert::From;
use std::mem::transmute;
use std::ops;
use std::ptr;
macro_rules! u_types {
($($t:ident,$size:expr),+) => {
$(
#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy)]
pub struct $t {
// Field `size` also denotes the sign of the number, while `limbs` reflect only the
// magnitude.
// We keep size >= 0 except in very rare circumstances.
size: i64,
limbs: [u64; $size],
}
impl $t {
fn data(&self) -> *mut u64 {
&self.limbs as *const u64 as *mut u64
}
fn normalize_size(&mut self) {
self.size = 0;
for i in (0..$size).rev() {
if self.limbs[i] != 0 {
self.size = (i + 1) as i64;
break;
}
}
}
// The cast from `i64` to `i32` is fine since |`size`| is <= 4 for `U256`, and <= 8 for
// `U512`.
#[allow(clippy::cast_possible_truncation)]
fn as_mpz(&self) -> mpz_t {
mpz_t {
size: self.size as i32,
d: std::ptr::NonNull::new((self.data())).unwrap(),
alloc: $size,
}
}
pub fn zero() -> Self {
Self { size: 0, limbs: [0; $size] }
}
pub fn is_zero(&self) -> bool {
self.size == 0
}
pub fn one() -> Self {
let mut limbs = [0; $size];
limbs[0] = 1;
Self { size: 1, limbs }
}
pub fn is_odd(&self) -> bool {
self.limbs[0] & 1 == 1
}
#[allow(clippy::if_not_else)]
/// Panics if `m == 0`.
pub fn mod_inv(self, m: &Self) -> Option<Self> {
let mut out = Self::zero();
let outmpz = out.as_mpz();
let s = self.as_mpz();
let m = m.as_mpz();
let exists = unsafe { gmp::mpz_invert(mut_ptr(&outmpz), mut_ptr(&s), mut_ptr(&m)) };
if exists != 0 {
out.size = i64::from(outmpz.size);
Some(out)
}
else {
None
}
}
/// Panics if `m == 0`.
pub fn pow_mod(self, e: Self, m: &Self) -> Self {
let mut out = Self::zero();
let outmpz = out.as_mpz();
let s = self.as_mpz();
let e = e.as_mpz();
let m = m.as_mpz();
unsafe { gmp::mpz_powm(mut_ptr(&outmpz), mut_ptr(&s), mut_ptr(&e), mut_ptr(&m)) };
out.size = i64::from(outmpz.size);
out
}
pub fn is_perfect_square(&self) -> bool {
let issqr = unsafe { gmp::mpn_perfect_square_p(self.data(), self.size) };
issqr != 0
}
pub fn jacobi(a: i32, b: &Self) -> i32 {
let mut a_data = 0;
let a = i32_to_mpz(a, &mut a_data);
let b = b.as_mpz();
unsafe { gmp::mpz_jacobi(&a as *const mpz_t, &b as *const mpz_t) }
}
pub fn is_congruent(self, i: i32, m: &Self) -> bool {
let mut data = 0;
let x = i32_to_mpz(i, &mut data);
let s = self.as_mpz();
let m = m.as_mpz();
let res = unsafe { gmp::mpz_congruent_p(mut_ptr(&s), mut_ptr(&x), mut_ptr(&m)) };
res != 0
}
pub fn is_divisible_u(&self, u: u64) -> bool {
let s = self.as_mpz();
let divisible = unsafe {gmp::mpz_divisible_ui_p(mut_ptr(&s), u)};
divisible != 0
}
/// Panics if `buf` is not large enough.
pub fn write_binary(&self, buf: &mut [u8]) -> usize {
unsafe { gmp::mpn_get_str(mut_ptr(&buf[0]), 2, self.data(), self.size) }
}
pub fn from_be_bytes(bytes: &[u8]) -> Self {
let x = Self::zero();
unsafe { gmp::mpn_set_str(x.data(), &bytes[0] as *const u8, bytes.len(), 256) };
x
}
}
impl PartialEq<u64> for $t {
fn eq(&self, u: &u64) -> bool {
(*u == 0 && self.size == 0) || (self.size == 1 && self.limbs[0] == *u)
}
}
impl From<[u64; $size]> for $t {
fn from(limbs: [u64; $size]) -> Self {
let mut x = Self { size: 0, limbs };
x.normalize_size();
x
}
}
impl From<u64> for $t {
fn from(x: u64) -> Self {
let mut limbs = [0; $size];
limbs[0] = x;
Self::from(limbs)
}
}
/// Lower-endian `bytes`.
impl From<[u8; $size * 8]> for $t {
fn from(bytes: [u8; $size * 8]) -> Self {
let chunks = unsafe { transmute::<[u8; $size * 8], [[u8; 8]; $size]>(bytes) };
let mut limbs = [0; $size];
for i in 0..$size {
limbs[i] = u64::from_le_bytes(chunks[i]);
}
Self::from(limbs)
}
}
/// Lower-endian `bytes`.
impl From<&[u8; $size * 8]> for $t {
fn from(bytes: &[u8; $size * 8]) -> Self {
let chunks = unsafe { transmute::<[u8; $size * 8], [[u8; 8]; $size]>(*bytes) };
let mut limbs = [0; $size];
for i in 0..$size {
limbs[i] = u64::from_le_bytes(chunks[i]);
}
Self::from(limbs)
}
}
impl PartialOrd for $t {
fn partial_cmp(&self, x: &Self) -> Option<Ordering> {
let x = unsafe { gmp::mpn_cmp(self.data(), x.data(), $size) };
Some({
if x < 0 {
Ordering::Less
} else if x == 0 {
Ordering::Equal
} else {
Ordering::Greater
}
})
}
}
impl Ord for $t {
fn cmp(&self, x: &Self) -> Ordering {
let x = unsafe { gmp::mpn_cmp(self.data(), x.data(), $size) };
if x < 0 {
Ordering::Less
} else if x == 0 {
Ordering::Equal
} else {
Ordering::Greater
}
}
}
impl ops::ShlAssign<u32> for $t {
fn shl_assign(&mut self, mut x: u32) {
while x != 0 {
let sz = min(gmp::LIMB_BITS as u32, x);
x -= sz;
unsafe { gmp::mpn_lshift(self.data(), self.data(), $size, sz) };
}
self.normalize_size();
}
}
impl ops::Shl<u32> for $t {
type Output = Self;
fn shl(self, x: u32) -> Self {
let mut y = self;
y <<= x;
y
}
}
impl ops::ShrAssign<u32> for $t {
fn shr_assign(&mut self, mut x: u32) {
while x != 0 {
let sz = min(gmp::LIMB_BITS as u32, x);
x -= sz;
unsafe { gmp::mpn_rshift(self.data(), self.data(), $size, sz) };
}
self.normalize_size();
}
}
impl ops::Shr<u32> for $t {
type Output = Self;
fn shr(self, x: u32) -> Self {
let mut y = self;
y >>= x;
y
}
}
impl ops::AddAssign for $t {
/// Panics if result overflows.
fn add_assign(&mut self, x: Self) {
let carry = unsafe { gmp::mpn_add_n(self.data(), self.data(), x.data(), $size) };
assert!(carry == 0);
self.normalize_size();
}
}
impl ops::Add for $t {
/// Panics if result overflows.
type Output = Self;
fn add(self, x: Self) -> Self {
let mut y = self;
y += x;
y
}
}
impl ops::Add<u64> for $t {
type Output = Self;
/// Panics if result overflows.
fn add(self, x: u64) -> Self {
self + Self::from(x)
}
}
impl ops::SubAssign for $t {
/// Panics if result is negative.
fn sub_assign(&mut self, x: Self) {
let borrow = unsafe { gmp::mpn_sub_n(self.data(), self.data(), x.data(), $size) };
assert!(borrow == 0);
self.normalize_size();
}
}
impl ops::Sub for $t {
type Output = Self;
/// Panics if result is negative.
fn sub(self, x: Self) -> Self {
let mut y = self;
y -= x;
y
}
}
impl ops::Sub<u64> for $t {
type Output = Self;
/// Panics if result is negative.
fn sub(self, x: u64) -> Self {
self - Self::from(x)
}
}
impl ops::Sub<u64> for &$t {
type Output = $t;
/// Panics if result is negative.
fn sub(self, x: u64) -> $t {
*self - $t::from(x)
}
}
impl ops::Rem<&Self> for $t {
type Output = Self;
fn rem(self, x: &Self) -> Self {
if x.size > self.size {
return self;
}
let (y, mut rem) = (Self::zero(), Self::zero());
unsafe {
gmp::mpn_tdiv_qr(
y.data(),
rem.data(),
0,
self.data(),
self.size,
x.data(),
x.size,
)
};
rem.normalize_size();
rem
}
}
impl ops::Rem for $t {
type Output = Self;
fn rem(self, x: Self) -> Self {
#![allow(clippy::op_ref)]
self % &x
}
}
impl ops::RemAssign<&Self> for $t {
fn rem_assign(&mut self, x: &Self) {
if x.size > self.size {
return;
}
let y = Self::zero();
unsafe {
gmp::mpn_tdiv_qr(
y.data(),
self.data(),
0,
self.data(),
self.size,
x.data(),
x.size,
)
};
self.normalize_size();
}
}
impl ops::RemAssign for $t {
fn rem_assign(&mut self, x: Self) {
#![allow(clippy::op_ref)]
*self %= &x;
}
}
impl ops::Div<&Self> for $t {
type Output = Self;
fn div(self, x: &Self) -> Self {
if x.size > self.size {
return self;
}
let (mut y, rem) = (Self::zero(), Self::zero());
unsafe {
gmp::mpn_tdiv_qr(
y.data(),
rem.data(),
0,
self.data(),
self.size,
x.data(),
x.size,
)
};
y.normalize_size();
y
}
}
impl ops::Div for $t {
type Output = Self;
fn div(self, x: Self) -> Self {
#![allow(clippy::op_ref)]
self / &x
}
}
impl From<$t> for Integer {
fn from(x: $t) -> Self {
Self::from_digits(&x.limbs, Order::Lsf)
}
}
)+
}
}
u_types!(U256, 4, U512, 8);
impl U512 {
/// Returns the lower half of this `U512` as a `U256`.
/// TODO: Make checked?
pub fn low_u256(self) -> U256 {
let mut x = unsafe { transmute::<Self, (U256, [u64; 4])>(self) }.0;
x.normalize_size();
x
}
}
impl From<&U256> for U512 {
fn from(x: &U256) -> Self {
let mut limbs = [0; 8];
limbs[..4].copy_from_slice(&x.limbs);
Self {
size: x.size,
limbs,
}
}
}
impl From<U256> for U512 {
fn from(x: U256) -> Self {
Self::from(&x)
}
}
// This gets its own implementation for performance.
impl ops::Rem<&U256> for U512 {
type Output = U256;
fn rem(self, x: &U256) -> U256 {
if x.size > self.size {
return self.low_u256();
}
let (y, mut rem) = (Self::zero(), U256::zero());
unsafe {
gmp::mpn_tdiv_qr(
y.data(),
rem.data(),
0,
self.data(),
self.size,
x.data(),
x.size,
)
};
rem.normalize_size();
rem
}
}
impl ops::Rem<U256> for U512 {
type Output = U256;
fn rem(self, x: U256) -> U256 {
#![allow(clippy::op_ref)]
self % &x
}
}
impl U256 {
/// Returns (result of removing all `f`s, number of `f`s removed)
pub fn remove_factor(self, f: Self) -> (Self, u64) {
// For some reason this needs extra scratch space.
let mut out = U512::zero();
let outmpz = out.as_mpz();
let s = self.as_mpz();
let f = f.as_mpz();
let c = unsafe { gmp::mpz_remove(mut_ptr(&outmpz), mut_ptr(&s), mut_ptr(&f)) };
out.size = i64::from(outmpz.size);
(out.low_u256(), c)
}
}
/// It turns out to be faster to provide multiplication as `U256 * U256 -> U512`, because it lets us
/// use `mpn_mul_n` instead of `mpn_mul`.
impl ops::Mul<&Self> for U256 {
type Output = U512;
fn mul(self, x: &Self) -> U512 {
let mut y = U512::zero();
unsafe { gmp::mpn_mul_n(y.data(), self.data(), x.data(), 4) };
y.normalize_size();
y
}
}
impl ops::Mul for U256 {
type Output = U512;
fn mul(self, x: Self) -> U512 {
#![allow(clippy::op_ref)]
self * &x
}
}
#[allow(unused_mut)]
fn mut_ptr<T>(mut t: &T) -> *mut T {
t as *const T as *mut T
}
pub fn u256<T>(t: T) -> U256
where
U256: From<T>,
{
U256::from(t)
}
pub fn u512<T>(t: T) -> U512
where
U512: From<T>,
{
U512::from(t)
}
fn i32_to_mpz(i: i32, data: &mut u64) -> mpz_t {
*data = i.abs() as u64;
mpz_t {
size: i.signum(),
d: std::ptr::NonNull::new(data).unwrap() ,
alloc: 1,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add() {
assert!(u256(1) + u256(0) == u256(1));
assert!(u256(1) + u256(2) == u256(3));
assert!(u256([0, 1, 0, 0]) + u256([0, 1, 0, 0]) == u256([0, 2, 0, 0]));
assert!(u256([0, 1, 0, 0]) + u256([0, 1, 1, 1]) == u256([0, 2, 1, 1]));
}
#[should_panic(expected = "assertion failed: carry == 0")]
#[test]
fn test_add_overflow() {
let _ = u256([0, 0, 0, u64::max_value()]) + u256([0, 0, 0, u64::max_value()]);
}
#[test]
fn test_sub() {
assert!(u256(1) - u256(0) == u256(1));
assert!(u256(1) - u256(1) == u256(0));
assert!(u256([0, 1, 0, 0]) - u256([0, 1, 0, 0]) == u256([0, 0, 0, 0]));
assert!(u256([0, 1, 0, 1]) - u256([0, 1, 0, 0]) == u256([0, 0, 0, 1]));
}
#[should_panic(expected = "assertion failed: borrow == 0")]
#[test]
fn test_sub_borrow() {
let _ = u256([0, 1, 0, 0]) - u256([0, 0, 1, 0]);
}
#[test]
fn test_mul() {
assert!(u256(0) * u256(3) == u512(0));
assert!(u256(2) * u256(3) == u512(6));
assert!(u256([0, 1, 0, 0]) * u256([0, 1, 0, 0]) == u512([0, 0, 1, 0, 0, 0, 0, 0]));
assert!(u256([0, 2, 0, 0]) * u256([0, 1, 0, 1]) == u512([0, 0, 2, 0, 2, 0, 0, 0]));
}
#[test]
fn test_div() {
assert!(u256(0) / u256(3) == u256(0));
assert!(u256(5) / u256(3) == u256(1));
assert!(u256(6) / u256(3) == u256(2));
assert!(u256([0, 0, 1, 0]) / u256([0, 1, 0, 0]) == u256([0, 1, 0, 0]));
}
#[test]
fn test_rem() {
assert!(u256(0) % u256(3) == u256(0));
assert!(u256(5) % u256(3) == u256(2));
assert!(u256(6) % u256(3) == u256(0));
assert!(u256([1, 0, 1, 0]) % u256([0, 1, 0, 0]) == u256(1));
}
#[test]
fn test_rem512() {
assert!(u512(0) % u256(3) == u256(0));
assert!(u512(5) % u256(3) == u256(2));
assert!(u512(6) % u256(3) == u256(0));
assert!(u512([1, 0, 1, 0, 0, 0, 0, 0]) % u256([0, 1, 0, 0]) == u256(1));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/util.rs | src/util.rs | //! Miscellaneous functions used throughout the library.
use crate::group::Group;
use crate::hash::hash_to_prime;
use rug::Integer;
use std::hash::Hash;
/// Pseudo-type-level programming.
/// This trait allows us to reflect "type-level" (i.e. static) information at runtime.
pub trait TypeRep: 'static {
/// The associated type of the simulated type-level static information.
type Rep: 'static;
/// Returns the static data for the type.
fn rep() -> &'static Self::Rep;
}
/// Convenience wrapper for creating `Rug` integers.
pub fn int<T>(val: T) -> Integer
where
Integer: From<T>,
{
Integer::from(val)
}
/// Hashes its arguments to primes and takes their product.
pub fn prime_hash_product<T: Hash>(ts: &[T]) -> Integer {
ts.iter().map(hash_to_prime).product()
}
/// Computes the `(xy)`th root of `g` given the `x`th and `y`th roots of `g` and `(x, y)` coprime.
// TODO: Consider moving this to the `accumulator` module?
#[allow(clippy::similar_names)]
pub fn shamir_trick<G: Group>(
xth_root: &G::Elem,
yth_root: &G::Elem,
x: &Integer,
y: &Integer,
) -> Option<G::Elem> {
if G::exp(xth_root, x) != G::exp(yth_root, y) {
return None;
}
let (gcd, a, b) = <(Integer, Integer, Integer)>::from(x.gcd_cofactors_ref(&y));
if gcd != int(1) {
return None;
}
Some(G::op(&G::exp(xth_root, &b), &G::exp(yth_root, &a)))
}
/// Solves a linear congruence of form `ax = b mod m` for the set of solutions `x`. Solution sets
/// are characterized by integers `mu` and `v` s.t. `x = mu + vn` and `n` is any integer.
pub fn solve_linear_congruence(
a: &Integer,
b: &Integer,
m: &Integer,
) -> Option<(Integer, Integer)> {
// g = gcd(a, m) => da + em = g
let (g, d, _) = <(Integer, Integer, Integer)>::from(a.gcd_cofactors_ref(m));
// q = floor_div(b, g)
// r = b % g
let (q, r) = <(Integer, Integer)>::from(b.div_rem_floor_ref(&g));
if r != 0 {
return None;
}
let mu = (q * d) % m;
let v = m / g;
Some((mu, v))
}
/// Folds over `xs` but in a divide-and-conquer fashion: Instead of `F(F(F(F(acc, a), b), c), d))`
/// this computes `F(acc, F(F(a, b), F(c, d)))`.
pub fn divide_and_conquer<F, T: Clone, E>(f: F, acc: T, xs: &[T]) -> Result<T, E>
where
F: Fn(&T, &T) -> Result<T, E>,
{
if xs.is_empty() {
return Ok(acc);
}
Ok(f(&acc, ÷_and_conquer_(&f, xs)?)?)
}
fn divide_and_conquer_<F, T: Clone, E>(f: &F, xs: &[T]) -> Result<T, E>
where
F: Fn(&T, &T) -> Result<T, E>,
{
if xs.len() == 1 {
return Ok(xs[0].clone());
}
let mid = xs.len() / 2;
let left = &xs[..mid];
let right = &xs[mid..];
Ok(f(
÷_and_conquer_(f, left)?,
÷_and_conquer_(f, right)?,
)?)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::group::{Group, Rsa2048, UnknownOrderGroup};
use crate::util::int;
#[derive(Debug)]
enum Never {}
/// Merge-based computation of `Integer` array products. Faster than the iterative
/// `iter.product()` for really large integers.
fn merge_product(xs: &[Integer]) -> Integer {
divide_and_conquer(
|a, b| -> Result<Integer, Never> { Ok(int(a * b)) },
int(1),
&xs,
)
.unwrap()
}
#[test]
fn test_linear_congruence_solver() {
assert_eq!(
(Integer::from(-2), Integer::from(4)),
solve_linear_congruence(&Integer::from(3), &Integer::from(2), &Integer::from(4)).unwrap()
);
assert_eq!(
(Integer::from(-2), Integer::from(4)),
solve_linear_congruence(&Integer::from(3), &Integer::from(2), &Integer::from(4)).unwrap()
);
assert_eq!(
(Integer::from(1), Integer::from(2)),
solve_linear_congruence(&Integer::from(5), &Integer::from(1), &Integer::from(2)).unwrap()
);
assert_eq!(
(Integer::from(-3), Integer::from(5)),
solve_linear_congruence(&Integer::from(2), &Integer::from(4), &Integer::from(5)).unwrap()
);
assert_eq!(
(Integer::from(2491), Integer::from(529)),
solve_linear_congruence(
&Integer::from(230),
&Integer::from(1081),
&Integer::from(12167)
)
.unwrap()
);
}
#[test]
fn test_linear_congruence_solver_no_solution() {
// Let `g = gcd(a, m)`. If `b` is not divisible by `g`, there are no solutions. If `b` is
// divisible by `g`, there are `g` solutions.
let result =
solve_linear_congruence(&Integer::from(33), &Integer::from(7), &Integer::from(143));
assert!(result.is_none());
let result =
solve_linear_congruence(&Integer::from(13), &Integer::from(14), &Integer::from(39));
assert!(result.is_none());
}
#[test]
fn test_shamir_trick() {
let (x, y, z) = (&int(13), &int(17), &int(19));
let xth_root = Rsa2048::exp(&Rsa2048::unknown_order_elem(), &int(y * z));
let yth_root = Rsa2048::exp(&Rsa2048::unknown_order_elem(), &int(x * z));
let xyth_root = Rsa2048::exp(&Rsa2048::unknown_order_elem(), z);
assert!(shamir_trick::<Rsa2048>(&xth_root, &yth_root, x, y) == Some(xyth_root));
}
#[test]
fn test_shamir_trick_failure() {
let (x, y, z) = (&int(7), &int(14), &int(19)); // Inputs not coprime.
let xth_root = Rsa2048::exp(&Rsa2048::unknown_order_elem(), &int(y * z));
let yth_root = Rsa2048::exp(&Rsa2048::unknown_order_elem(), &int(x * z));
assert!(shamir_trick::<Rsa2048>(&xth_root, &yth_root, x, y) == None);
}
#[test]
fn test_merge_product() {
let ints = vec![int(3), int(5), int(7), int(9), int(11)];
assert!(merge_product(&ints) == int(10395));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/accumulator.rs | src/accumulator.rs | //! Accumulator library, built on a generic group interface.
use crate::group::UnknownOrderGroup;
use crate::hash::hash_to_prime;
use crate::proof::{Poe, Poke2};
use crate::util::{divide_and_conquer, int, prime_hash_product, shamir_trick};
use rug::Integer;
use std::hash::Hash;
use std::marker::PhantomData;
#[derive(Debug)]
/// The different types of accumulator errors.
pub enum AccError {
/// Bad witness.
BadWitness,
/// Error when updating a witness.
BadWitnessUpdate,
/// Division by zero.
DivisionByZero,
/// Inexact division where exact division was expected.
InexactDivision,
/// Inputs not coprime when they were expected to be coprime.
InputsNotCoprime,
}
// See https://doc.rust-lang.org/std/marker/struct.PhantomData.html#ownership-and-the-drop-check
// for recommendations regarding phantom types. Note that we disregard the suggestion to use a
// const reference in the phantom type parameter, which causes issues for the `Send` trait.
#[derive(Debug, Eq, Hash, PartialEq)]
/// A cryptographic accumulator. Wraps a single unknown-order group element and phantom data
/// representing the type `T` being hashed-to-prime and accumulated.
pub struct Accumulator<G: UnknownOrderGroup, T> {
phantom: PhantomData<T>,
value: G::Elem,
}
// Manual clone impl required because Rust's type inference is not good. See
// https://github.com/rust-lang/rust/issues/26925.
impl<G: UnknownOrderGroup, T: Hash> Clone for Accumulator<G, T> {
fn clone(&self) -> Self {
Self {
phantom: PhantomData,
value: self.value.clone(),
}
}
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// A witness to one or more values in an accumulator, represented as an accumulator.
pub struct Witness<G: UnknownOrderGroup, T: Hash>(pub Accumulator<G, T>);
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// A succinct proof of membership (some element is in some accumulator).
pub struct MembershipProof<G: UnknownOrderGroup, T: Hash> {
/// The witness for the element in question.
pub witness: Witness<G, T>,
proof: Poe<G>,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
/// A succinct proof of nonmembership (some element is not in some accumulator).
pub struct NonmembershipProof<G: UnknownOrderGroup, T> {
phantom: PhantomData<*const T>,
d: G::Elem,
v: G::Elem,
gv_inv: G::Elem,
poke2_proof: Poke2<G>,
poe_proof: Poe<G>,
}
impl<G: UnknownOrderGroup, T: Eq + Hash> Accumulator<G, T> {
/// Returns a new, empty accumulator.
pub fn empty() -> Self {
Self {
phantom: PhantomData,
value: G::unknown_order_elem(),
}
}
/// Internal add method that also returns the prime hash product of added elements, enabling an
/// efficient `add_with_proof`.
fn add_(&self, elems: &[T]) -> (Self, Integer) {
let x = prime_hash_product(elems);
let acc_elem = G::exp(&self.value, &x);
(
Self {
phantom: PhantomData,
value: acc_elem,
},
x,
)
}
// The conciseness of `accumulator.add()` and low probability of confusion with implementations of
// the `Add` trait probably justify this...
#[allow(clippy::should_implement_trait)]
/// Adds `elems` to the accumulator. This cannot check whether the elements have already been
/// added, so is up to clients to ensure uniqueness.
///
/// Uses a move instead of a `&self` reference to prevent accidental use of the old accumulator.
pub fn add(self, elems: &[T]) -> Self {
self.add_(elems).0
}
/// A specialized version of `add` that also returns a batch membership proof for added elements.
pub fn add_with_proof(self, elems: &[T]) -> (Self, MembershipProof<G, T>) {
let (acc, x) = self.add_(elems);
let proof = Poe::<G>::prove(&self.value, &x, &acc.value);
(
acc,
MembershipProof {
witness: Witness(self),
proof,
},
)
}
/// Internal delete method that also returns the prime hash product of deleted elements, enabling
/// an efficient `delete_with_proof`.
///
/// Uses a divide-and-conquer approach to running the ShamirTrick, which keeps the average input
/// smaller: For `[a, b, c, d]` do `S(S(a, b), S(c, d))` instead of `S(S(S(a, b), c), d)`.
fn delete_(self, elem_witnesses: &[(T, Witness<G, T>)]) -> Result<(Self, Integer), AccError> {
let prime_witnesses = elem_witnesses
.iter()
.map(|(elem, witness)| (hash_to_prime(elem), witness.0.value.clone()))
.collect::<Vec<_>>();
for (p, witness_elem) in &prime_witnesses {
if G::exp(&witness_elem, &p) != self.value {
return Err(AccError::BadWitness);
}
}
let (prime_product, acc_elem) = divide_and_conquer(
|(p1, v1), (p2, v2)| Ok((int(p1 * p2), shamir_trick::<G>(&v1, &v2, p1, p2).unwrap())),
(int(1), self.value),
&prime_witnesses[..],
)?;
Ok((
Self {
phantom: PhantomData,
value: acc_elem.clone(),
},
prime_product,
))
}
/// Removes the elements in `elem_witnesses` from the accumulator.
///
/// # Arguments
///
/// * `elem_witnesses` - Tuples consisting of (element to delete, element's witness).
///
/// Uses a move instead of a `&self` reference to prevent accidental use of the old accumulator.
pub fn delete(self, elem_witnesses: &[(T, Witness<G, T>)]) -> Result<Self, AccError> {
Ok(self.delete_(elem_witnesses)?.0)
}
/// A specialized version of `delete` that also returns a batch membership proof for deleted
/// elements.
pub fn delete_with_proof(
self,
elem_witnesses: &[(T, Witness<G, T>)],
) -> Result<(Self, MembershipProof<G, T>), AccError> {
let (acc, prime_product) = self.clone().delete_(elem_witnesses)?;
let proof = Poe::<G>::prove(&acc.value, &prime_product, &self.value);
Ok((
acc.clone(),
MembershipProof {
witness: Witness(acc),
proof,
},
))
}
/// Computes the batch membership proof for the elements in `elem_witnesses` w.r.t this
/// accumulator.
///
/// # Arguments
///
/// * `elem_witnesses` - Tuples consisting of (element to prove, element's witness).
pub fn prove_membership(
&self,
elem_witnesses: &[(T, Witness<G, T>)],
) -> Result<MembershipProof<G, T>, AccError> {
let witness_accum = self.clone().delete(elem_witnesses)?;
let prod = elem_witnesses
.iter()
.map(|(t, _)| hash_to_prime(t))
.product();
let proof = Poe::<G>::prove(&witness_accum.value, &prod, &self.value);
Ok(MembershipProof {
witness: Witness(witness_accum),
proof,
})
}
/// Verifies a membership proof against the current accumulator and an element `t` whose
/// inclusion is being proven.
pub fn verify_membership(
&self,
t: &T,
MembershipProof { witness, proof }: &MembershipProof<G, T>,
) -> bool {
let exp = hash_to_prime(t);
Poe::verify(&witness.0.value, &exp, &self.value, proof)
}
/// Batch version of `verify_membership` for multiple `elems`.
pub fn verify_membership_batch(
&self,
elems: &[T],
MembershipProof { witness, proof }: &MembershipProof<G, T>,
) -> bool {
let exp = prime_hash_product(elems);
Poe::verify(&witness.0.value, &exp, &self.value, proof)
}
/// Updates a `witness` for `tracked_elems` w.r.t the current accumulator, adding the elements in
/// `untracked_additions` to the tracked set and removing the elements in `untracked_deletions`
/// from the tracked set.
///
/// See Section 4.2 of LLX for implementation details.
pub fn update_membership_witness(
&self,
witness: Witness<G, T>,
tracked_elems: &[T],
untracked_additions: &[T],
untracked_deletions: &[T],
) -> Result<Witness<G, T>, AccError> {
let x = prime_hash_product(tracked_elems);
let x_hat = prime_hash_product(untracked_deletions);
for elem in tracked_elems {
if untracked_additions.contains(elem) || untracked_deletions.contains(elem) {
return Err(AccError::BadWitnessUpdate);
}
}
let (gcd, a, b) = <(Integer, Integer, Integer)>::from(x.gcd_cofactors_ref(&x_hat));
assert!(gcd == int(1));
let w = witness.0.add(untracked_additions);
let w_to_b = G::exp(&w.value, &b);
let acc_new_to_a = G::exp(&self.value, &a);
Ok(Witness(Self {
phantom: PhantomData,
value: G::op(&w_to_b, &acc_new_to_a),
}))
}
/// Computes the batch non-membership proof for the elements in `elems` w.r.t this accumulator
/// and its `acc_set`.
///
/// # Arguments
///
/// * `acc_set` - The set of elements committed to by this accumulator.
/// * `elems` - The set of elements you want to prove are not in `acc_set`.
pub fn prove_nonmembership(
&self,
acc_set: &[T],
elems: &[T],
) -> Result<NonmembershipProof<G, T>, AccError> {
let x: Integer = elems.iter().map(hash_to_prime).product();
let s = acc_set.iter().map(hash_to_prime).product();
let (gcd, a, b) = <(Integer, Integer, Integer)>::from(x.gcd_cofactors_ref(&s));
if gcd != int(1) {
return Err(AccError::InputsNotCoprime);
}
let g = G::unknown_order_elem();
let d = G::exp(&g, &a);
let v = G::exp(&self.value, &b);
let gv_inv = G::op(&g, &G::inv(&v));
let poke2_proof = Poke2::prove(&self.value, &b, &v);
let poe_proof = Poe::prove(&d, &x, &gv_inv);
Ok(NonmembershipProof {
phantom: PhantomData,
d,
v,
gv_inv,
poke2_proof,
poe_proof,
})
}
/// Verifies a non-membership proof against the current accumulator and elements `elems` whose
/// non-inclusion is being proven.
pub fn verify_nonmembership(
&self,
elems: &[T],
NonmembershipProof {
d,
v,
gv_inv,
poke2_proof,
poe_proof,
..
}: &NonmembershipProof<G, T>,
) -> bool {
let x = elems.iter().map(hash_to_prime).product();
Poke2::verify(&self.value, v, poke2_proof) && Poe::verify(d, &x, gv_inv, poe_proof)
}
}
impl<G: UnknownOrderGroup, T: Eq + Hash> From<&[T]> for Accumulator<G, T> {
fn from(ts: &[T]) -> Self {
Self::empty().add(ts)
}
}
impl<G: UnknownOrderGroup, T: Clone + Hash> Witness<G, T> {
/// Given a witness for `witness_set`, returns a witness for `witness_subset`.
///
/// The `witness_subset` must be a subset of the `witness_set`.
pub fn compute_subset_witness(
self,
witness_set: &[T],
witness_subset: &[T],
) -> Result<Self, AccError>
where
T: PartialEq,
{
for witness in witness_subset {
if !witness_set.contains(witness) {
return Err(AccError::BadWitness);
}
}
let numerator = prime_hash_product(witness_set);
let denominator = prime_hash_product(witness_subset);
let (quotient, remainder) = numerator.div_rem(denominator);
if remainder != int(0) {
return Err(AccError::InexactDivision);
}
Ok(Self(Accumulator {
phantom: PhantomData,
value: G::exp(&self.0.value, "ient),
}))
}
/// Given a witness for many `elems`, computes a sub-witness for each individual element in
/// O(N log N) time.
pub fn compute_individual_witnesses(&self, elems: &[T]) -> Vec<(T, Self)> {
let hashes = elems.iter().map(hash_to_prime).collect::<Vec<_>>();
elems
.iter()
.zip(self.root_factor(&hashes).iter())
.map(|(x, y)| (x.clone(), y.clone()))
.collect()
}
#[allow(non_snake_case)]
fn root_factor(&self, elems: &[Integer]) -> Vec<Self> {
if elems.len() == 1 {
return vec![self.clone()];
}
let half_n = elems.len() / 2;
let g_l = elems[..half_n].iter().fold(self.clone(), |sum, x| {
Self(Accumulator {
phantom: PhantomData,
value: G::exp(&sum.0.value, x),
})
});
let g_r = elems[half_n..].iter().fold(self.clone(), |sum, x| {
Self(Accumulator {
phantom: PhantomData,
value: G::exp(&sum.0.value, x),
})
});
let mut L = g_r.root_factor(&Vec::from(&elems[..half_n]));
let mut R = g_l.root_factor(&Vec::from(&elems[half_n..]));
L.append(&mut R);
L
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::group::{ClassGroup, Rsa2048};
fn new_acc<G: UnknownOrderGroup, T: Hash + Eq>(data: &[T]) -> Accumulator<G, T> {
Accumulator::<G, T>::empty().add(data)
}
macro_rules! test_all_groups {
($test_func:ident, $func_name_rsa:ident, $func_name_class:ident, $($attr:meta)*) => {
#[test]
$(
#[$attr]
)*
fn $func_name_rsa() {
$test_func::<Rsa2048>();
}
#[test]
$(
#[$attr]
)*
fn $func_name_class() {
$test_func::<ClassGroup>();
}
};
}
test_all_groups!(test_add, test_add_rsa2048, test_add_class,);
fn test_add<G: UnknownOrderGroup>() {
let acc = new_acc::<G, &'static str>(&["a", "b"]);
let new_elems = ["c", "d"];
let (acc_new, proof) = acc.add_with_proof(&new_elems);
let acc_expected = G::exp(
&G::unknown_order_elem(),
&prime_hash_product(&["a", "b", "c", "d"]),
);
assert!(acc_new.value == acc_expected);
assert!(acc_new.verify_membership_batch(&new_elems, &proof));
}
test_all_groups!(test_delete, test_delete_rsa2048, test_delete_class,);
fn test_delete<G: UnknownOrderGroup>() {
let acc_0 = new_acc::<G, &'static str>(&["a", "b"]);
let (acc_1, c_proof) = acc_0.clone().add_with_proof(&["c"]);
let (acc_2, proof) = acc_1
.clone()
.delete_with_proof(&[("c", c_proof.witness)])
.expect("valid delete expected");
assert!(acc_2 == acc_0);
assert!(acc_1.verify_membership(&"c", &proof));
}
test_all_groups!(
test_delete_empty,
test_delete_empty_rsa2048,
test_delete_empty_class,
);
fn test_delete_empty<G: UnknownOrderGroup>() {
let acc = new_acc::<G, &'static str>(&["a", "b"]);
let (acc_new, proof) = acc
.clone()
.delete_with_proof(&[])
.expect("valid delete expected");
assert!(acc_new == acc);
assert!(acc.verify_membership_batch(&[], &proof));
}
test_all_groups!(
test_delete_bad_witness,
test_delete_bad_witness_rsa2048,
test_delete_bad_witness_class,
should_panic(expected = "BadWitness")
);
fn test_delete_bad_witness<G: UnknownOrderGroup>() {
let acc = Accumulator::<G, &'static str>::empty();
let a_witness = Witness(new_acc::<G, &'static str>(&["b", "c"]));
let b_witness = Witness(new_acc::<G, &'static str>(&["a", "c"]));
acc.delete(&[("a", a_witness), ("b", b_witness)]).unwrap();
}
test_all_groups!(
test_update_membership_witness,
test_update_membership_witness_rsa2048,
test_update_membership_witness_class,
);
fn test_update_membership_witness<G: UnknownOrderGroup>() {
let acc = new_acc::<G, &'static str>(&["a", "b", "c"]);
let witness = Witness(new_acc::<G, &'static str>(&["c", "d"]));
let witness_new = acc
.update_membership_witness(witness, &["a"], &["b"], &["d"])
.unwrap();
assert!(witness_new.0.add(&["a"]) == acc);
}
test_all_groups!(
test_update_membership_witness_failure,
test_update_membership_witness_failure_rsa2048,
test_update_membership_witness_failure_class,
should_panic(expected = "BadWitnessUpdate")
);
fn test_update_membership_witness_failure<G: UnknownOrderGroup>() {
let acc = new_acc::<G, &'static str>(&["a", "b", "c"]);
let witness = Witness(new_acc::<G, &'static str>(&["c", "d"]));
acc
.update_membership_witness(witness, &["a"], &["b"], &["a"])
.unwrap();
}
test_all_groups!(
test_prove_nonmembership,
test_prove_nonmembership_rsa2048,
test_prove_nonmembership_class,
);
fn test_prove_nonmembership<G: UnknownOrderGroup>() {
let acc_set = ["a", "b"];
let acc = new_acc::<G, &'static str>(&acc_set);
let non_members = ["c", "d"];
let proof = acc
.prove_nonmembership(&acc_set, &non_members)
.expect("valid proof expected");
assert!(acc.verify_nonmembership(&non_members, &proof));
}
test_all_groups!(
test_compute_sub_witness,
test_compute_sub_witness_rsa2048,
test_compute_sub_witness_class,
);
fn test_compute_sub_witness<G: UnknownOrderGroup>() {
let empty_witness = Witness(Accumulator::<G, &'static str>::empty());
let sub_witness = empty_witness
.compute_subset_witness(&["a", "b"], &["a"])
.unwrap();
let exp_quotient_expected = Witness(new_acc::<G, &'static str>(&["b"]));
assert!(sub_witness == exp_quotient_expected);
}
test_all_groups!(
test_compute_sub_witness_failure,
test_compute_sub_witness_failure_rsa2048,
test_compute_sub_witness_failure_class,
should_panic(expected = "BadWitness")
);
fn test_compute_sub_witness_failure<G: UnknownOrderGroup>() {
let empty_witness = Witness(Accumulator::<G, &'static str>::empty());
empty_witness
.compute_subset_witness(&["a", "b"], &["c"])
.unwrap();
}
fn test_compute_individual_witnesses<G: UnknownOrderGroup>() {
let acc = new_acc::<G, &'static str>(&["a", "b", "c"]);
let witness_multiple = Witness(new_acc::<G, &'static str>(&["a"]));
let witnesses = witness_multiple.compute_individual_witnesses(&["b", "c"]);
for (elem, witness) in witnesses {
assert_eq!(acc.value, G::exp(&witness.0.value, &hash_to_prime(elem)));
}
}
#[test]
fn test_compute_individual_witnesses_rsa2048() {
// Class version takes too long for a unit test.
test_compute_individual_witnesses::<Rsa2048>();
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/hash/blake2b.rs | src/hash/blake2b.rs | //! `GeneralHasher` interface for `blake2_rfc`.
use super::GeneralHasher;
use blake2_rfc::blake2b::Blake2b as Blake2b_;
use std::hash::Hasher;
/// Thin wrapper around `Blake2b` from `blake2_rfc`.
pub struct Blake2b(pub Blake2b_);
impl Default for Blake2b {
fn default() -> Self {
// 32 bytes = 256 bits
Self(Blake2b_::new(32))
}
}
impl Hasher for Blake2b {
/// We could return a truncated hash but it's easier just to not use this fn for now.
fn finish(&self) -> u64 {
panic!("Don't use! Prefer finalize(self).")
}
fn write(&mut self, bytes: &[u8]) {
Blake2b_::update(&mut self.0, bytes)
}
}
impl GeneralHasher for Blake2b {
type Output = [u8; 32];
fn finalize(self) -> Self::Output {
let res = self.0.finalize();
*array_ref![res.as_bytes(), 0, 32]
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/hash/mod.rs | src/hash/mod.rs | //! This module wraps `blake2b_rfc` into a convenient hashing interface (`GeneralHasher`) and
//! exports the generalized `hash` function. Also exported is `hash_to_prime`, which works by
//! repeatedly `hash`ing a value together with an incrementing nonce until the output is prime.
use crate::uint::u256;
use rug::integer::Order;
use rug::Integer;
use std::hash::{Hash, Hasher};
mod blake2b;
pub use blake2b::Blake2b;
pub mod primality;
/// Like `std::hash::Hasher`, but general over output type.
pub trait GeneralHasher: Hasher {
/// The associated output type of the Hasher.
type Output;
/// Similar to `Hasher::finish`, but consumes `self`.
fn finalize(self) -> Self::Output;
}
// Note: We explicitly pass in the hasher constructor so we don't have to specify its type via
// generics. Rust has poor support for type applications, so if we wanted to pass `H` at the
// type-level, we'd need to fully specify `T` as well, which is a pain in the ass.
//
// Instead of writing:
// `hash::<Blake2b, (&G::Elem, &BigUint, &G::Elem)>(&(base, exp, result))`
//
// This lets us write:
// `hash(&Blake2b::default, &(base, exp, result))`
/// Hash using the general Hasher.
///
/// This function takes in the hash constructor as an argument for convenience.
pub fn hash<H: GeneralHasher, T: Hash + ?Sized>(new_hasher: &Fn() -> H, t: &T) -> H::Output {
let mut h = new_hasher();
t.hash(&mut h);
h.finalize()
}
/// Calls `hash` with a Blake2b hasher.
pub fn blake2b<T: Hash + ?Sized>(t: &T) -> Integer {
Integer::from_digits(&hash(&Blake2b::default, t), Order::Msf)
}
/// Hashes `t` to an odd prime.
///
/// Uses `Blake2b` as the hash function, and hashes with a counter until a prime is found via
/// probabilistic primality checking.
///
/// This function is optimized for 256-bit integers.
#[allow(clippy::module_name_repetitions)]
pub fn hash_to_prime<T: Hash + ?Sized>(t: &T) -> Integer {
let mut counter = 0_u64;
loop {
let mut hash = hash(&Blake2b::default, &(t, counter));
// Make the candidate prime odd. This gives ~7% performance gain on a 2018 Macbook Pro.
hash[0] |= 1;
let candidate_prime = u256(hash);
if primality::is_prob_prime(&candidate_prime) {
return Integer::from(candidate_prime);
}
counter += 1;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_blake2() {
let data = b"martian cyborg gerbil attack";
hash(&Blake2b::default, data);
}
#[test]
fn test_() {
let b_1 = "boom i got ur boyfriend";
let b_2 = "boom i got ur boyfriene";
assert_ne!(b_1, b_2);
let h_1 = hash_to_prime(b_1);
let h_2 = hash_to_prime(b_2);
assert_ne!(h_1, h_2);
let mut digits1 = [0; 4];
h_1.write_digits(&mut digits1, Order::Lsf);
assert!(primality::is_prob_prime(&u256(digits1)));
let mut digits2 = [0; 4];
h_2.write_digits(&mut digits2, Order::Lsf);
assert!(primality::is_prob_prime(&u256(digits2)));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/hash/primality/mod.rs | src/hash/primality/mod.rs | //! Primality testing for U256 inputs. Use `is_prob_prime` unless you have a specific reason to use
//! a lower-level test.
use crate::uint::{u256, u512, U256};
mod constants;
use constants::{D_VALUES, SMALL_PRIMES};
/// Implements the Baillie-PSW probabilistic primality test, which is known to be deterministic over
/// all integers up to 64 bits.
///
/// Outperforms naked Miller-Rabin (i.e. iterated Fermat tests of random base) at wide `n` since
/// Fermat and Lucas pseudoprimes have been shown to be anticorrelated. Steps of BPSW are as
/// follows:
///
/// 1. Accept small primes and reject multiples of them.
/// 2. Do a single iteration of Miller-Rabin (in particular, a base-2 Fermat test).
/// 3. Do a strong probabilistic Lucas test (squares filtered during test initialization).
pub fn is_prob_prime(n: &U256) -> bool {
for &p in SMALL_PRIMES.iter() {
if n.is_divisible_u(p) {
return *n == p;
}
}
passes_miller_rabin_base_2(&n) && passes_lucas(&n)
}
/// A single iteration of the Miller-Rabin test (base-2 Fermat test).
pub fn passes_miller_rabin_base_2(n: &U256) -> bool {
let (d, r) = (n - 1).remove_factor(u256(2));
let mut x = u256(2).pow_mod(d, n);
if x == 1 || x == n - 1 {
return true;
}
for _ in 1..r {
x = x * x % n;
if x == 1 {
return false;
}
if x == n - 1 {
return true;
}
}
false
}
/// Strong Lucas probable prime test (NOT the more common Lucas primality test which requires
/// factorization of `n-1`).
///
/// Selects parameters `d`, `p`, `q` according to Selfridge's method.
///
/// If `n` passes, it is either prime or a "strong" Lucas pseudoprime. (The precise meaning of
/// "strong" is not fixed in the literature.) Procedure can be further strengthened by implementing
/// more tests in Section 6 of [Baillie and Wagstaff 1980], but for now this is TODO.
///
/// See also: [Lucas pseudoprime](https://en.wikipedia.org/wiki/Lucas_pseudoprime) on Wikipedia.
pub fn passes_lucas(n: &U256) -> bool {
let d_ = choose_d(n);
if d_.is_err() {
return false;
}
let d = d_.unwrap();
let q = (1 - d) / 4;
let (u_delta, v_delta, q_delta_over_2) =
compute_lucas_sequences(*n + 1, n, u256(1), u256(1), q, d);
// `u_delta % n != 0` proves n composite.
u_delta == 0
// Additional check which is not strictly part of Lucas test but nonetheless filters some
// composite n for free. See section "Checking additional congruence conditions" on Wikipedia.
&& v_delta.is_congruent(2 * q, n)
// Congruence check which holds for prime n by Euler's criterion.
&& q_delta_over_2.is_congruent(q * U256::jacobi(q, n), n)
}
#[derive(Debug)]
struct IsPerfectSquare();
/// Finds and returns first `D` in `[5, -7, 9, ..., 5 + 2 * max_iter]` for which Jacobi symbol
/// `(D/n) = -1`, or `Err` if no such `D` exists. In the case that `n` is square, there is no such
/// `D` even with `max_iter` infinite. Hence if you are not entirely sure that `n` is nonsquare,
/// you should pass a low value to `max_iter` to avoid wasting too much time. Note that the average
/// number of iterations required for nonsquare `n` is 1.8, and empirically we find it is extremely
/// rare that `|d| > 13`.
///
/// We experimented with postponing the `is_perfect_square` check until after some number of
/// iterations but ultimately found no performance gain. It is likely that most perfect squares
/// are caught by the Miller-Rabin test.
fn choose_d(n: &U256) -> Result<i32, IsPerfectSquare> {
if n.is_perfect_square() {
return Err(IsPerfectSquare());
}
for &d in D_VALUES.iter() {
if U256::jacobi(d, n) == -1 {
return Ok(d);
}
}
panic!("n is not square but we still couldn't find a d value!")
}
/// Computes the Lucas sequences `{u_i(p, q)}` and `{v_i(p, q)}` up to a specified index `k_target`
/// in O(log(`k_target`)) time by recursively calculating only the `(2i)`th and `(2i+1)`th elements
/// in an order determined by the binary expansion of `k`. Also returns `q^{k/2} (mod n)`, which is
/// used in a stage of the strong Lucas test. In the Lucas case we specify that `d = p^2 - 4q` and
/// set `k_target = delta = n - (d/n) = n + 1`.
///
/// Note that `p` does not show up in the code because it is set to 1.
#[allow(clippy::cast_sign_loss)]
fn compute_lucas_sequences(
k_target: U256,
n: &U256,
mut u: U256,
mut v: U256,
q0: i32,
d: i32,
) -> (U256, U256, U256) {
// Mod an `i32` into the `[0, n)` range.
let i_mod_n = |x: i32| {
if x < 0 {
*n - (u256(x.abs() as u64) % n)
} else {
u256(x as u64) % n
}
};
let q0 = i_mod_n(q0);
let d = i_mod_n(d);
let mut q = q0;
let mut q_k_over_2 = q0;
// Finds `t` in `Z_n` with `2t = x (mod n)`.
// Assumes `x` in `[0, n)`.
let half = |x: U256| {
if x.is_odd() {
(x >> 1) + (*n >> 1) + 1
} else {
x >> 1
}
};
let sub_mod_n = |a, b| {
if a > b {
(a - b) % n
} else {
*n - (b - a) % n
}
};
// Write binary expansion of `k` as [x_1, ..., x_l], e.g. [1, 0, 1, 1] for 11. `x_1` is always
// 1. For `i = 2, 3, ..., l`, do the following: if `x_i = 0`, then update `u_k` and `v_k` to
// `u_{2k}` and `v_{2k}`, respectively. Else if `x_i = 1`, update to `u_{2k+1}` and `v_{2k+1}`.
// At the end of the loop we will have computed `u_k` and `v_k`, with `k` as given, in
// `log(delta)` time.
let mut k_target_bits = [0; 257];
let len = k_target.write_binary(&mut k_target_bits);
for &bit in k_target_bits[..len].iter().skip(1) {
// Compute `(u, v)_{2k}` from `(u, v)_k` according to the following:
// u_2k = u_k * v_k (mod n)
// v_2k = v_k^2 - 2*q^k (mod n)
u = u * v % n;
v = sub_mod_n(v * v, u512(q) << 1);
// Continuously maintain `q_k = q^k (mod n)` and `q_k_over_2 = q^{k/2} (mod n)`.
q_k_over_2 = q;
q = q * q % n;
if bit == 1 {
// Compute `(u, v)_{2k+1}` from `(u, v)_{2k}` according to the following:
// u_{2k+1} = 1/2 * (p*u_{2k} + v_{2k}) (mod n)
// v_{2k+1} = 1/2 * (d*u_{2k} + p*v_{2k}) (mod n)
let u_old = u;
u = half((u512(u) + u512(v)) % n);
v = half((d * u_old + u512(v)) % n);
q = q * q0 % n;
}
}
// These are all `mod n` so the `low_u256` is lossless. We could make it checked...
(u, v, q_k_over_2)
}
#[cfg(test)]
mod tests {
use self::constants::*;
use super::*;
#[test]
fn test_miller_rabin() {
assert!(passes_miller_rabin_base_2(&u256(13)));
assert!(!passes_miller_rabin_base_2(&u256(65)));
for &p in LARGE_PRIMES.iter() {
assert!(passes_miller_rabin_base_2(&u256(p)));
assert!(!passes_miller_rabin_base_2(
&(u256(p) * u256(106_957)).low_u256()
));
}
for &n in STRONG_BASE_2_PSEUDOPRIMES.iter() {
assert!(passes_miller_rabin_base_2(&u256(n)));
}
}
#[test]
fn test_lucas() {
assert!(passes_lucas(&u256(5)));
// Should fail on `p = 2`.
for &sp in SMALL_PRIMES[1..].iter() {
assert!(passes_lucas(&u256(sp)));
assert!(!passes_lucas(&(u256(sp) * u256(2047)).low_u256()));
}
for &mp in MED_PRIMES.iter() {
assert!(passes_lucas(&u256(mp)));
assert!(!passes_lucas(&(u256(mp) * u256(5)).low_u256()));
}
for &lp in LARGE_PRIMES.iter() {
assert!(passes_lucas(&u256(lp)));
assert!(!passes_lucas(&(u256(lp) * u256(7)).low_u256()));
}
}
#[test]
fn test_is_prob_prime() {
// Sanity checks.
assert!(is_prob_prime(&u256(2)));
assert!(is_prob_prime(&u256(5)));
assert!(is_prob_prime(&u256(7)));
assert!(is_prob_prime(&u256(241)));
assert!(is_prob_prime(&u256(7919)));
assert!(is_prob_prime(&u256(48131)));
assert!(is_prob_prime(&u256(76463)));
assert!(is_prob_prime(&u256(115_547)));
// Medium primes.
for &p in MED_PRIMES.iter() {
assert!(is_prob_prime(&u256(p)));
}
// Large primes.
for &p in LARGE_PRIMES.iter() {
assert!(is_prob_prime(&u256(p)));
}
// Large, difficult-to-factor composites.
for &p in LARGE_PRIMES.iter() {
for &q in LARGE_PRIMES.iter() {
assert!(!is_prob_prime(&(u256(p) * u256(q)).low_u256()));
}
}
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/hash/primality/constants.rs | src/hash/primality/constants.rs | /// Used as a prefilter to `is_prob_prime`. The number of these has been tuned on a 2018 Macbook
/// Pro.
pub const SMALL_PRIMES: [u64; 200] = [
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,
197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307,
311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547,
557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797,
809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929,
937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049,
1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,
];
#[allow(dead_code, clippy::unreadable_literal)]
pub const MED_PRIMES: [u64; 456] = [
106957, 106961, 106963, 106979, 106993, 107021, 107033, 107053, 107057, 107069, 107071, 107077,
107089, 107099, 107101, 107119, 107123, 107137, 107171, 107183, 107197, 107201, 107209, 107227,
107243, 107251, 107269, 107273, 107279, 107309, 107323, 107339, 107347, 107351, 107357, 107377,
107441, 107449, 107453, 107467, 107473, 107507, 107509, 107563, 107581, 107599, 107603, 107609,
107621, 107641, 107647, 107671, 107687, 107693, 107699, 107713, 107717, 107719, 107741, 107747,
107761, 107773, 107777, 107791, 107827, 107837, 107839, 107843, 107857, 107867, 107873, 107881,
107897, 107903, 107923, 107927, 107941, 107951, 107971, 107981, 107999, 108007, 108011, 108013,
108023, 108037, 108041, 108061, 108079, 108089, 108107, 108109, 108127, 108131, 108139, 108161,
108179, 108187, 108191, 108193, 108203, 108211, 108217, 108223, 108233, 108247, 108263, 108271,
108287, 108289, 108293, 108301, 108343, 108347, 108359, 108377, 108379, 108401, 108413, 108421,
108439, 108457, 108461, 108463, 108497, 108499, 108503, 108517, 108529, 108533, 108541, 108553,
108557, 108571, 108587, 108631, 108637, 108643, 108649, 108677, 108707, 108709, 108727, 108739,
108751, 108761, 108769, 108791, 108793, 108799, 108803, 108821, 108827, 108863, 108869, 108877,
108881, 108883, 108887, 108893, 108907, 108917, 108923, 108929, 108943, 108947, 108949, 108959,
108961, 108967, 108971, 108991, 109001, 109013, 109037, 109049, 109063, 109073, 109097, 109103,
109111, 109121, 109133, 109139, 109141, 109147, 109159, 109169, 109171, 109199, 109201, 109211,
109229, 109253, 109267, 109279, 109297, 109303, 109313, 109321, 109331, 109357, 109363, 109367,
109379, 109387, 109391, 109397, 109423, 109433, 109441, 109451, 109453, 109469, 109471, 109481,
109507, 109517, 109519, 109537, 109541, 109547, 109567, 109579, 109583, 109589, 109597, 109609,
109619, 109621, 109639, 109661, 109663, 109673, 109717, 109721, 109741, 109751, 109789, 109793,
109807, 109819, 109829, 109831, 109841, 109843, 109847, 109849, 109859, 109873, 109883, 109891,
109897, 109903, 109913, 109919, 109937, 109943, 109961, 109987, 110017, 110023, 110039, 110051,
110059, 110063, 110069, 110083, 110119, 110129, 110161, 110183, 110221, 110233, 110237, 110251,
110261, 110269, 110273, 110281, 110291, 110311, 110321, 110323, 110339, 110359, 110419, 110431,
110437, 110441, 110459, 110477, 110479, 110491, 110501, 110503, 110527, 110533, 110543, 110557,
110563, 110567, 110569, 110573, 110581, 110587, 110597, 110603, 110609, 110623, 110629, 110641,
110647, 110651, 110681, 110711, 110729, 110731, 110749, 110753, 110771, 110777, 110807, 110813,
110819, 110821, 110849, 110863, 110879, 110881, 110899, 110909, 110917, 110921, 110923, 110927,
110933, 110939, 110947, 110951, 110969, 110977, 110989, 111029, 111031, 111043, 111049, 111053,
111091, 111103, 111109, 111119, 111121, 111127, 111143, 111149, 111187, 111191, 111211, 111217,
111227, 111229, 111253, 111263, 111269, 111271, 111301, 111317, 111323, 111337, 111341, 111347,
111373, 111409, 111427, 111431, 111439, 111443, 111467, 111487, 111491, 111493, 111497, 111509,
111521, 111533, 111539, 111577, 111581, 111593, 111599, 111611, 111623, 111637, 111641, 111653,
111659, 111667, 111697, 111721, 111731, 111733, 111751, 111767, 111773, 111779, 111781, 111791,
111799, 111821, 111827, 111829, 111833, 111847, 111857, 111863, 111869, 111871, 111893, 111913,
111919, 111949, 111953, 111959, 111973, 111977, 111997, 112019, 112031, 112061, 112067, 112069,
112087, 112097, 112103, 112111, 112121, 112129, 112139, 112153, 112163, 112181, 112199, 112207,
112213, 112223, 112237, 112241, 112247, 112249, 112253, 112261, 112279, 112289, 112291, 112297,
];
#[allow(dead_code)]
pub const LARGE_PRIMES: [u64; 4] = [
553_525_575_239_331_913,
12_702_637_924_034_044_211,
378_373_571_372_703_133,
8_640_171_141_336_142_787,
];
#[allow(dead_code)]
pub const STRONG_BASE_2_PSEUDOPRIMES: [u64; 10] = [
2047, 3277, 4033, 4681, 8321, 15841, 29341, 42799, 49141, 52633,
];
#[allow(dead_code)]
pub const STRONG_LUCAS_PSEUDOPRIMES: [u64; 10] = [
5459, 5777, 10877, 16109, 18971, 22499, 24569, 25199, 40309, 58519,
];
#[allow(dead_code)]
pub const EXTRA_STRONG_LUCAS_PSEUDOPRIMES: [u64; 10] = [
989, 3239, 5777, 10877, 27971, 29681, 30739, 31631, 39059, 72389,
];
pub const D_VALUES: [i32; 500] = [
5, -7, 9, -11, 13, -15, 17, -19, 21, -23, 25, -27, 29, -31, 33, -35, 37, -39, 41, -43, 45, -47,
49, -51, 53, -55, 57, -59, 61, -63, 65, -67, 69, -71, 73, -75, 77, -79, 81, -83, 85, -87, 89,
-91, 93, -95, 97, -99, 101, -103, 105, -107, 109, -111, 113, -115, 117, -119, 121, -123, 125,
-127, 129, -131, 133, -135, 137, -139, 141, -143, 145, -147, 149, -151, 153, -155, 157, -159,
161, -163, 165, -167, 169, -171, 173, -175, 177, -179, 181, -183, 185, -187, 189, -191, 193,
-195, 197, -199, 201, -203, 205, -207, 209, -211, 213, -215, 217, -219, 221, -223, 225, -227,
229, -231, 233, -235, 237, -239, 241, -243, 245, -247, 249, -251, 253, -255, 257, -259, 261,
-263, 265, -267, 269, -271, 273, -275, 277, -279, 281, -283, 285, -287, 289, -291, 293, -295,
297, -299, 301, -303, 305, -307, 309, -311, 313, -315, 317, -319, 321, -323, 325, -327, 329,
-331, 333, -335, 337, -339, 341, -343, 345, -347, 349, -351, 353, -355, 357, -359, 361, -363,
365, -367, 369, -371, 373, -375, 377, -379, 381, -383, 385, -387, 389, -391, 393, -395, 397,
-399, 401, -403, 405, -407, 409, -411, 413, -415, 417, -419, 421, -423, 425, -427, 429, -431,
433, -435, 437, -439, 441, -443, 445, -447, 449, -451, 453, -455, 457, -459, 461, -463, 465,
-467, 469, -471, 473, -475, 477, -479, 481, -483, 485, -487, 489, -491, 493, -495, 497, -499,
501, -503, 505, -507, 509, -511, 513, -515, 517, -519, 521, -523, 525, -527, 529, -531, 533,
-535, 537, -539, 541, -543, 545, -547, 549, -551, 553, -555, 557, -559, 561, -563, 565, -567,
569, -571, 573, -575, 577, -579, 581, -583, 585, -587, 589, -591, 593, -595, 597, -599, 601,
-603, 605, -607, 609, -611, 613, -615, 617, -619, 621, -623, 625, -627, 629, -631, 633, -635,
637, -639, 641, -643, 645, -647, 649, -651, 653, -655, 657, -659, 661, -663, 665, -667, 669,
-671, 673, -675, 677, -679, 681, -683, 685, -687, 689, -691, 693, -695, 697, -699, 701, -703,
705, -707, 709, -711, 713, -715, 717, -719, 721, -723, 725, -727, 729, -731, 733, -735, 737,
-739, 741, -743, 745, -747, 749, -751, 753, -755, 757, -759, 761, -763, 765, -767, 769, -771,
773, -775, 777, -779, 781, -783, 785, -787, 789, -791, 793, -795, 797, -799, 801, -803, 805,
-807, 809, -811, 813, -815, 817, -819, 821, -823, 825, -827, 829, -831, 833, -835, 837, -839,
841, -843, 845, -847, 849, -851, 853, -855, 857, -859, 861, -863, 865, -867, 869, -871, 873,
-875, 877, -879, 881, -883, 885, -887, 889, -891, 893, -895, 897, -899, 901, -903, 905, -907,
909, -911, 913, -915, 917, -919, 921, -923, 925, -927, 929, -931, 933, -935, 937, -939, 941,
-943, 945, -947, 949, -951, 953, -955, 957, -959, 961, -963, 965, -967, 969, -971, 973, -975,
977, -979, 981, -983, 985, -987, 989, -991, 993, -995, 997, -999, 1001, -1003,
];
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/group/ristretto.rs | src/group/ristretto.rs | //! Ristretto group implementation (based on the `curve25519-dalek` crate).
use super::Group;
use crate::util::{int, TypeRep};
use curve25519_dalek::ristretto::RistrettoPoint;
use curve25519_dalek::scalar::Scalar;
use curve25519_dalek::traits::Identity;
use rug::integer::Order;
use rug::ops::Pow;
use rug::Integer;
use std::hash::{Hash, Hasher};
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
/// Ristretto group implementation (based on the `curve25519-dalek` crate).
pub enum Ristretto {}
lazy_static! {
pub static ref MAX_SAFE_EXPONENT: Integer = int(2).pow(255) - 1;
pub static ref MAX_SAFE_SCALAR: Scalar = {
let mut digits: [u8; 32] = [0; 32];
MAX_SAFE_EXPONENT.write_digits(&mut digits, Order::LsfLe);
Scalar::from_bytes_mod_order(digits)
};
}
impl Ristretto {
fn max_safe_exponent() -> &'static Integer {
&MAX_SAFE_EXPONENT
}
}
// REVIEW: Ideally we'd just use `RistrettoPoint` here, but only traits defined in this crate can
// be implemented for arbitrary types. How to fix without wrapping?
//
// It may make sense to fork `curve25519-dalek` to add the `Hash` impl. Then we won't need to wrap.
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, PartialEq, Eq)]
/// A Ristretto group element, directly wrapping a Ristretto point.
pub struct RistrettoElem(RistrettoPoint);
#[allow(clippy::derive_hash_xor_eq)]
impl Hash for RistrettoElem {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.compress().as_bytes().hash(state);
}
}
impl TypeRep for Ristretto {
type Rep = ();
fn rep() -> &'static Self::Rep {
&()
}
}
impl Group for Ristretto {
type Elem = RistrettoElem;
fn op_(_: &(), a: &RistrettoElem, b: &RistrettoElem) -> RistrettoElem {
RistrettoElem(a.0 + b.0)
}
fn id_(_: &()) -> RistrettoElem {
RistrettoElem(RistrettoPoint::identity())
}
fn inv_(_: &(), x: &RistrettoElem) -> RistrettoElem {
RistrettoElem(-x.0)
}
fn exp_(_: &(), x: &RistrettoElem, n: &Integer) -> RistrettoElem {
let mut remaining = n.clone();
let mut result = Self::id();
while remaining > *MAX_SAFE_EXPONENT {
result = RistrettoElem(result.0 + x.0 * (*MAX_SAFE_SCALAR));
remaining -= Self::max_safe_exponent();
}
let mut digits: [u8; 32] = [0; 32];
remaining.write_digits(&mut digits, Order::LsfLe);
let factor = Scalar::from_bytes_mod_order(digits);
RistrettoElem(result.0 + x.0 * factor)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::int;
use curve25519_dalek::constants;
#[test]
fn test_inv() {
let bp = RistrettoElem(constants::RISTRETTO_BASEPOINT_POINT);
let bp_inv = Ristretto::inv(&bp);
assert!(Ristretto::op(&bp, &bp_inv) == Ristretto::id());
assert_ne!(bp, bp_inv);
}
#[test]
fn test_exp() {
let bp = RistrettoElem(constants::RISTRETTO_BASEPOINT_POINT);
let exp_a = Ristretto::exp(&bp, &int(2).pow(258));
let exp_b = Ristretto::exp(&bp, &int(2).pow(257));
let exp_b_2 = Ristretto::exp(&exp_b, &int(2));
assert_eq!(exp_a, exp_b_2);
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/group/class.rs | src/group/class.rs | //! Fixed-discriminant implementation of an ideal class group, with future optimizations.
//!
//! Using a class group instead of an RSA group for accumulators or vector commitments eliminates
//! the need for a trusted setup, albeit at the expense of slower operations.
use super::{ElemFrom, Group, UnknownOrderGroup};
use crate::util;
use crate::util::{int, TypeRep};
use rug::{Assign, Integer};
use std::hash::{Hash, Hasher};
use std::str::FromStr;
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
/// Class group implementation, with future optimizations available via the `--features` flag.
/// Discriminant generated via OpenSSL.
pub enum ClassGroup {}
// 2048-bit prime, negated, congruent to `3 mod 4`. Generated using OpenSSL.
// According to "A Survey of IQ Cryptography" (Buchmann & Hamdy) Table 1, IQ-MPQS for computing
// discrete logarithms in class groups with a 2048-bit discriminant is comparable in complexity to
// GNFS for factoring a 4096-bit integer.
const DISCRIMINANT2048_DECIMAL: &str =
"-30616069034807523947093657516320815215492876376165067902716988657802400037331914448218251590830\
1102189519215849430413184776658192481976276720778009261808832630304841711366872161223643645001916\
6969493423497224870506311710491233557329479816457723381368788734079933165653042145718668727765268\
0575673207678516369650123480826989387975548598309959486361425021860161020248607833276306314923730\
9854570972702350567411779734372573754840570138310317754359137013512655926325773048926718050691092\
9453371727344087286361426404588335160385998280988603297435639020911295652025967761702701701471162\
3966286152805654229445219531956098223";
lazy_static! {
pub static ref CLASS_GROUP_DISCRIMINANT: Integer =
Integer::from_str(DISCRIMINANT2048_DECIMAL).unwrap();
}
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, Eq)]
/// A class group element, which wraps three GMP integers from the `rug` crate. You should never
/// need to construct a class group element yourself.
pub struct ClassElem {
a: Integer,
b: Integer,
c: Integer,
}
// `ClassElem` and `ClassGroup` ops based on Chia's fantastic doc explaining applied class groups:
// https://github.com/Chia-Network/vdf-competition/blob/master/classgroups.pdf.
impl ClassGroup {
/// This method is only public for benchmarking. You should not need to use it.
pub fn normalize(a: Integer, b: Integer, c: Integer) -> (Integer, Integer, Integer) {
if Self::is_normal(&a, &b, &c) {
return (a, b, c);
}
// r = floor_div((a - b), 2a)
// (a, b, c) = (a, b + 2ra, ar^2 + br + c)
let (r, _) = int(&a - &b).div_rem_floor(int(2 * &a));
let new_b = &b + 2 * int(&r * &a);
let new_c = c + b * &r + &a * r.square();
(a, new_b, new_c)
}
/// This method is only public for benchmarking. You should not need to use it.
// Note: Does not return a `ClassElem` because the output is not guaranteed to be
// a valid `ClassElem` for all inputs.
pub fn reduce(mut a: Integer, mut b: Integer, mut c: Integer) -> (Integer, Integer, Integer) {
while !Self::is_reduced(&a, &b, &c) {
// s = floor_div(c + b, 2c)
let (s, _) = int(&c + &b).div_rem_floor(int(2 * &c));
// (a, b, c) = (c, −b + 2sc, cs^2 − bs + a)
let old_a = a.clone();
let old_b = b.clone();
a = c.clone();
b = -b + 2 * int(&s * &c);
c = -int(&old_b * &s) + old_a + c * s.square();
}
Self::normalize(a, b, c)
}
#[allow(non_snake_case)]
/// This method is only public for benchmarking. You should not need to use it.
pub fn square(x: &ClassElem) -> ClassElem {
// Solve `bk = c mod a` for `k`, represented by `mu`, `v` and any integer `n` s.t.
// `k = mu + v * n`.
let (mu, _) = util::solve_linear_congruence(&x.b, &x.c, &x.a).unwrap();
// A = a^2
// B = b - 2a * mu
// tmp = (b * mu) / a
// C = mu^2 - tmp
let a = int(x.a.square_ref());
let b = &x.b - int(2 * &x.a) * μ
let (tmp, _) = <(Integer, Integer)>::from(int((&x.b * &mu) - &x.c).div_rem_floor_ref(&x.a));
let c = mu.square() - tmp;
Self::elem((a, b, c))
}
fn discriminant(a: &Integer, b: &Integer, c: &Integer) -> Integer {
int(b.square_ref()) - int(4) * a * c
}
fn validate(a: &Integer, b: &Integer, c: &Integer) -> bool {
Self::discriminant(a, b, c) == *Self::rep()
}
fn is_reduced(a: &Integer, b: &Integer, c: &Integer) -> bool {
Self::is_normal(a, b, c) && (a <= c && !(a == c && *b < int(0)))
}
fn is_normal(a: &Integer, b: &Integer, _c: &Integer) -> bool {
-int(a) < int(b) && b <= a
}
}
impl TypeRep for ClassGroup {
type Rep = Integer;
fn rep() -> &'static Self::Rep {
&CLASS_GROUP_DISCRIMINANT
}
}
impl Group for ClassGroup {
type Elem = ClassElem;
#[allow(non_snake_case)]
fn op_(_: &Integer, x: &ClassElem, y: &ClassElem) -> ClassElem {
// g = (b1 + b2) / 2
// h = (b2 - b1) / 2
// w = gcd(a1, a2, g)
let (g, _) = (int(&x.b) + &y.b).div_rem_floor(int(2));
let (h, _) = (&y.b - int(&x.b)).div_rem_floor(int(2));
let w = int(x.a.gcd_ref(&y.a)).gcd(&g);
// j = w
// s = a1 / w
// t = a2 / w
// u = g / ww
// r = 0
let j = int(&w);
let (s, _) = <(Integer, Integer)>::from(x.a.div_rem_floor_ref(&w));
let (t, _) = <(Integer, Integer)>::from(y.a.div_rem_floor_ref(&w));
let (u, _) = g.div_rem_floor(w);
// a = tu
// b = hu + sc
// m = st
// Solve linear congruence `(tu)k = hu + sc mod st` or `ak = b mod m` for solutions `k`.
let a = int(&t * &u);
let b = int(&h * &u) + (&s * &x.c);
let mut m = int(&s * &t);
let (mu, v) = util::solve_linear_congruence(&a, &b, &m).unwrap();
// a = tv
// b = h - t * mu
// m = s
// Solve linear congruence `(tv)k = h - t * mu mod s` or `ak = b mod m` for solutions `k`.
let a = int(&t * &v);
let b = &h - int(&t * &mu);
m.assign(&s);
let (lambda, _) = util::solve_linear_congruence(&a, &b, &m).unwrap();
// k = mu + v * lambda
// l = (k * t - h) / s
// m = (tuk - hu - cs) / st
let k = &mu + int(&v * &lambda);
let (l, _) = <(Integer, Integer)>::from((int(&k * &t) - &h).div_rem_floor_ref(&s));
let (m, _) = (int(&t * &u) * &k - &h * &u - &x.c * &s).div_rem_floor(int(&s * &t));
// A = st
// B = ju - kt + ls
// C = kl - jm
let a = int(&s * &t);
let b = int(&j * &u) - (int(&k * &t) + int(&l * &s));
let c = int(&k * &l) - int(&j * &m);
Self::elem((a, b, c))
}
// Constructs the reduced element directly instead of using `Self::Elem()`.
fn id_(d: &Integer) -> ClassElem {
let a = int(1);
let b = int(1);
// c = (b * b - d) / 4a
let (c, _) = int(1 - d).div_rem_floor(int(4));
ClassElem { a, b, c }
}
// Constructs the inverse directly instead of using `Self::Elem()`.
fn inv_(_: &Integer, x: &ClassElem) -> ClassElem {
ClassElem {
a: int(&x.a),
b: int(-(&x.b)),
c: int(&x.c),
}
}
fn exp_(_: &Integer, a: &ClassElem, n: &Integer) -> ClassElem {
let (mut val, mut a, mut n) = {
if *n < int(0) {
(Self::id(), Self::inv(a), int(-n))
} else {
(Self::id(), a.clone(), n.clone())
}
};
loop {
if n == int(0) {
return val;
}
if n.is_odd() {
val = Self::op(&val, &a);
}
a = Self::square(&a);
n >>= 1;
}
}
}
impl UnknownOrderGroup for ClassGroup {
fn unknown_order_elem_(d: &Integer) -> ClassElem {
// a = 2
// b = 1
// c = (b * b - d) / 4a
let a = int(2);
let b = int(1);
let c = int(1 - d) / int(8);
ClassElem { a, b, c }
}
}
impl Hash for ClassElem {
// Assumes `ClassElem` is reduced and normalized, which will be the case unless a struct is
// instantiated manually in this module.
fn hash<H: Hasher>(&self, state: &mut H) {
self.a.hash(state);
self.b.hash(state);
self.c.hash(state);
}
}
impl PartialEq for ClassElem {
fn eq(&self, other: &Self) -> bool {
self.a == other.a && self.b == other.b && self.c == other.c
}
}
/// Panics if `(a, b, c)` cannot be reduced to a valid class element.
impl<A, B, C> ElemFrom<(A, B, C)> for ClassGroup
where
Integer: From<A>,
Integer: From<B>,
Integer: From<C>,
{
fn elem(abc: (A, B, C)) -> ClassElem {
let (a, b, c) = Self::reduce(int(abc.0), int(abc.1), int(abc.2));
// Ideally, this should return an error and the return type of `ElemFrom` should be
// `Result<Self::Elem, Self:err>`, but this would require a lot of ugly `unwrap`s in the
// accumulator library. Besides, users should not need to create new class group elements, so
// an invalid `ElemFrom` here should signal a severe internal error.
assert!(Self::validate(&a, &b, &c));
ClassElem { a, b, c }
}
}
// Caveat: Tests that use "ground truth" use outputs from Chia's sample implementation in python:
// https://github.com/Chia-Network/vdf-competition/blob/master/inkfish/classgroup.py.
#[cfg(test)]
mod tests {
use super::*;
use std::collections::hash_map::DefaultHasher;
// Makes a class elem tuple but does not reduce.
fn construct_raw_elem_from_strings(a: &str, b: &str, c: &str) -> ClassElem {
ClassElem {
a: Integer::from_str(a).unwrap(),
b: Integer::from_str(b).unwrap(),
c: Integer::from_str(c).unwrap(),
}
}
#[should_panic]
#[test]
fn test_bad_elem() {
let _ = ClassGroup::elem((1, 2, 3));
}
#[test]
fn test_elem_from() {
let a1 = Integer::from_str("16").unwrap();
let b1 = Integer::from_str("105").unwrap();
let c1 = Integer::from_str(
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814207",
)
.unwrap();
let a2 = Integer::from_str("16").unwrap();
let b2 = Integer::from_str("9").unwrap();
let c2 = Integer::from_str(
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814036",
)
.unwrap();
let reduced_elem = ClassGroup::elem((a1, b1, c1));
let also_reduced_elem = ClassGroup::elem((a2, b2, c2));
assert_eq!(reduced_elem, also_reduced_elem);
}
#[test]
fn test_equality() {
let not_reduced = construct_raw_elem_from_strings(
"16",
"105",
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814207"
);
let reduced_ground_truth = construct_raw_elem_from_strings(
"16",
"9",
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814036"
);
let diff_elem = construct_raw_elem_from_strings(
"4",
"1",
"19135043146754702466933535947700509509683047735103167439198117911126500023332446530136407244\
268818886844950990589400824048541137030123517295048625578863052039394052606960429510076477727\
812619793559333896857655440664448190570209733309248852860771133554929587999582285331513410741\
679548532925359795754799072731031327175516868367484717873943724678975890638662600637655379895\
797691446827331865910685793896910463236233398285859677535633644394859647446063344540995395360\
815557919878168193309083573295900545539758915028677094752412489256178770608972743880695597825\
16229851064188563419476497892884550353389340326220747256139"
);
assert!(not_reduced != reduced_ground_truth);
assert!(not_reduced == not_reduced.clone());
assert!(reduced_ground_truth == reduced_ground_truth.clone());
assert!(not_reduced != diff_elem);
assert!(reduced_ground_truth != diff_elem);
let reduced = ClassGroup::elem((not_reduced.a, not_reduced.b, not_reduced.c));
assert!(reduced == reduced_ground_truth);
}
#[test]
fn test_hash() {
let not_reduced = construct_raw_elem_from_strings(
"16",
"105",
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814207"
);
let reduced_ground_truth = construct_raw_elem_from_strings(
"16",
"9",
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814036"
);
let diff_elem = construct_raw_elem_from_strings(
"4",
"1",
"19135043146754702466933535947700509509683047735103167439198117911126500023332446530136407244\
268818886844950990589400824048541137030123517295048625578863052039394052606960429510076477727\
812619793559333896857655440664448190570209733309248852860771133554929587999582285331513410741\
679548532925359795754799072731031327175516868367484717873943724678975890638662600637655379895\
797691446827331865910685793896910463236233398285859677535633644394859647446063344540995395360\
815557919878168193309083573295900545539758915028677094752412489256178770608972743880695597825\
16229851064188563419476497892884550353389340326220747256139"
);
let mut hasher_lh = DefaultHasher::new();
let mut hasher_rh = DefaultHasher::new();
not_reduced.hash(&mut hasher_lh);
reduced_ground_truth.hash(&mut hasher_rh);
assert!(hasher_lh.finish() != hasher_rh.finish());
assert!(hasher_lh.finish() == hasher_lh.finish());
assert!(hasher_rh.finish() == hasher_rh.finish());
hasher_lh = DefaultHasher::new();
hasher_rh = DefaultHasher::new();
let reduced = ClassGroup::elem((not_reduced.a, not_reduced.b, not_reduced.c));
reduced.hash(&mut hasher_lh);
reduced_ground_truth.hash(&mut hasher_rh);
assert!(hasher_lh.finish() == hasher_rh.finish());
hasher_lh = DefaultHasher::new();
hasher_rh = DefaultHasher::new();
reduced.hash(&mut hasher_lh);
diff_elem.hash(&mut hasher_rh);
assert!(hasher_lh.finish() != hasher_rh.finish());
}
#[test]
fn test_reduce_basic() {
// Unreduced element.
let to_reduce = construct_raw_elem_from_strings(
"59162244921619725812008939143220718157267937427074598447911241410131470159247784852210767449\
675610037288729551814191198624164179866076352187405442496568188988272422133088755036699145362\
385840772236403043664778415471196678638241785773530531198720497580622741709880533724904220122\
358854068046553219863419609777498761804625479650772123754523807001976654588225908928022367436\
8",
"18760351095004839755193532164856605650590306627169248964100884295652838905828158941233738613\
175821849253748329102319504958410190952820220503570113920576542676928659211807590199941027958\
195895385446372444261885022800653454209101497963588809819572703579484085278913354621371362285\
341138299691587953249270188429393417132110841259813122945626515477865766896056280729710478647\
13",
"14872270891432803054791175727694631095755964943358394411314110783404577714102170379700365256\
599679049493824862742803590079461712691146098397470840896560034332315858221821103076776907123\
277315116632337385101204055232891361405428635972040596205450316747012080794838691280547894128\
246741601088755087359234554141346980837292342320288111397175220296098629890108459305643419353\
36"
);
let reduced_ground_truth = construct_raw_elem_from_strings(
"26888935961824081232597112540509824504614070059776273347136888921115497522070287009841688662\
983066376019079593372296556420848446780369918809384119124783870290778875424468497961559643807\
918398860928578027038014112641529893817109240852544158309292025321122680747989987560029531021\
808743313150630063377037854944",
"14529985196481999393995154363327100184407232892559561136140792409262328867440167480822808496\
853924547751298342980606034124112579835255733824790020119078588372593288210628255956605240171\
744703418426092073347584357826862813733154338737148962212641444735717023402201569115323580814\
54099903972209626147819759991",
"28467266502267127591420289007165819749231433586093061478772560429058231137856046130384492811\
816456933286039468940950129263300933723839212086399375780796041634531383342902918719073416087\
614456845205980227091403964285870107268917183244016635907926846271829374679124848388403486656\
1564478239095738726823372184204"
);
let (a, b, c) = ClassGroup::reduce(to_reduce.a, to_reduce.b, to_reduce.c);
assert_eq!(ClassElem { a, b, c }, reduced_ground_truth.clone());
let reduced_ground_truth_ = reduced_ground_truth.clone();
let (a, b, c) = ClassGroup::reduce(
reduced_ground_truth_.a,
reduced_ground_truth_.b,
reduced_ground_truth_.c,
);
assert_eq!(ClassElem { a, b, c }, reduced_ground_truth);
}
#[test]
// REVIEW: This test should be restructured to not construct `ClassElem`s but it will do for now.
fn test_normalize_basic() {
let unnormalized = construct_raw_elem_from_strings(
"16",
"105",
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814207",
);
let normalized_ground_truth = construct_raw_elem_from_strings(
"16",
"9",
"4783760786688675616733383986925127377420761933775791859799529477781625005833111632534101811\
06720472171123774764735020601213528425753087932376215639471576300984851315174010737751911943\
19531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526\
85419887133231339948938699768182757831793879217091871179468485931169743972659665650159413844\
97394942286170683296647767144847422761580905834957146491938390841109871491186151583613524884\
88402038894799695420483272708933239751363849397287571692736881031223140446926522431859701738\
9945629057462766047140854869124473221137588347335081555186814036",
);
let (a, b, c) = ClassGroup::normalize(unnormalized.a, unnormalized.b, unnormalized.c);
assert_eq!(normalized_ground_truth, ClassElem { a, b, c });
}
#[test]
// REVIEW: This test should be rewritten, because it may be broken by `unknown_order_elem` not
// working correctly.
fn test_discriminant_basic() {
let g = ClassGroup::unknown_order_elem();
assert_eq!(
ClassGroup::discriminant(&g.a, &g.b, &g.c),
*ClassGroup::rep()
);
}
#[test]
// REVIEW: This test should be rewritten. See review for `test_discriminant_basic`.
fn test_discriminant_across_ops() {
let id = ClassGroup::id();
let g1 = ClassGroup::unknown_order_elem();
let g2 = ClassGroup::op(&g1, &g1);
let g3 = ClassGroup::op(&id, &g2);
let g3_inv = ClassGroup::inv(&g3);
assert!(ClassGroup::validate(&id.a, &id.b, &id.c));
assert!(ClassGroup::validate(&g1.a, &g1.b, &g1.c));
assert!(ClassGroup::validate(&g2.a, &g2.b, &g2.c));
assert!(ClassGroup::validate(&g3.a, &g3.b, &g3.c));
assert!(ClassGroup::validate(&g3_inv.a, &g3_inv.b, &g3_inv.c));
}
#[test]
fn test_op_single() {
let a = construct_raw_elem_from_strings(
"4",
"1",
"19135043146754702466933535947700509509683047735103167439198117911126500023332446530136407244\
268818886844950990589400824048541137030123517295048625578863052039394052606960429510076477727\
812619793559333896857655440664448190570209733309248852860771133554929587999582285331513410741\
679548532925359795754799072731031327175516868367484717873943724678975890638662600637655379895\
797691446827331865910685793896910463236233398285859677535633644394859647446063344540995395360\
815557919878168193309083573295900545539758915028677094752412489256178770608972743880695597825\
16229851064188563419476497892884550353389340326220747256139"
);
let b = construct_raw_elem_from_strings(
"16",
"41",
"47837607866886756167333839869251273774207619337757918597995294777816250058331116325341018110\
672047217112377476473502060121352842575308793237621563947157630098485131517401073775191194319\
531549483898334742144138601661120476425524333273122132151927833887323969998955713328783526854\
198871332313399489386997681827578317938792170918711794684859311697439726596656501594138449739\
494228617068329664776714484742276158090583495714649193839084110987149118615158361352488488402\
038894799695420483272708933239751363849397287571692736881031223140446926522431859701738994562\
9057462766047140854869124473221137588347335081555186814061"
);
let ground_truth = construct_raw_elem_from_strings(
"64",
"9",
"11959401966721689041833459967312818443551904834439479649498823694454062514582779081335254527\
668011804278094369118375515030338210643827198309405390986789407524621282879350268443797798579\
882887370974583685536034650415280119106381083318280533037981958471830992499738928332195881713\
549717833078349872346749420456894579484698042729677948671214827924359931649164125398534612434\
873557154267082416194178621185569039522645873928662298459771027746787279653789590338122122100\
50972369992385512081817723330993784096234932189292318422025780578511173163060796492543474864\
07264365691511785213717281118305284397086833770388796703509"
);
assert_eq!(ClassGroup::op(&a, &b), ground_truth);
}
#[test]
fn test_op_alternating() {
let g_anchor = ClassGroup::unknown_order_elem();
let mut g = ClassGroup::id();
let mut g_star = ClassGroup::id();
// g
g = ClassGroup::op(&g_anchor, &g);
// g^2, g^* = g^2
g = ClassGroup::op(&g_anchor, &g);
g_star = ClassGroup::op(&g, &g_star);
// g^3
g = ClassGroup::op(&g_anchor, &g);
// g^4, g^* = g^2 * g^4 = g^6
g = ClassGroup::op(&g_anchor, &g);
g_star = ClassGroup::op(&g, &g_star);
let ground_truth = construct_raw_elem_from_strings(
"64",
"9",
"11959401966721689041833459967312818443551904834439479649498823694454062514582779081335254527\
668011804278094369118375515030338210643827198309405390986789407524621282879350268443797798579\
882887370974583685536034650415280119106381083318280533037981958471830992499738928332195881713\
549717833078349872346749420456894579484698042729677948671214827924359931649164125398534612434\
873557154267082416194178621185569039522645873928662298459771027746787279653789590338122122100\
509723699923855120818177233309937840962349321892923184220257805785111731630607964925434748640\
7264365691511785213717281118305284397086833770388796703509"
);
assert_eq!(ground_truth, g_star);
}
#[test]
fn test_op_complex() {
// 1. Take g^100, g^200, ..., g^1000.
// 2. Compute g^* = g^100 * ... * g^1000.
// 3. For each of g^100, g^200, ..., g^1000 compute the inverse of that element and assert that
// g^* * current_inverse = product of g^100, g^200, ..., g^1000 without the inversed-out
// element.
let g_anchor = ClassGroup::unknown_order_elem();
let mut g = ClassGroup::id();
let mut gs = vec![];
let mut gs_invs = vec![];
let mut g_star = ClassGroup::id();
for i in 1..=1000 {
g = ClassGroup::op(&g_anchor, &g);
assert!(ClassGroup::validate(&g.a, &g.b, &g.c));
if i % 100 == 0 {
gs.push(g.clone());
gs_invs.push(ClassGroup::inv(&g));
g_star = ClassGroup::op(&g, &g_star);
assert!(ClassGroup::validate(&g_star.a, &g_star.b, &g_star.c));
}
}
let elems_n_invs = gs.iter().zip(gs_invs.iter());
for (g_elem, g_inv) in elems_n_invs {
assert!(ClassGroup::validate(&g_elem.a, &g_elem.b, &g_elem.c));
assert!(ClassGroup::validate(&g_inv.a, &g_inv.b, &g_inv.c));
let mut curr_prod = ClassGroup::id();
for elem in &gs {
if elem != g_elem {
curr_prod = ClassGroup::op(&curr_prod, &elem);
assert!(ClassGroup::validate(
&curr_prod.a,
&curr_prod.b,
&curr_prod.c
));
}
}
assert_eq!(ClassGroup::id(), ClassGroup::op(&g_inv, &g_elem));
assert_eq!(curr_prod, ClassGroup::op(&g_inv, &g_star));
}
}
#[test]
fn test_id_basic() {
let g = ClassGroup::unknown_order_elem();
let id = ClassGroup::id();
assert_eq!(g, ClassGroup::op(&g, &id));
assert_eq!(g, ClassGroup::op(&id, &g));
assert_eq!(id, ClassGroup::op(&id, &id));
}
#[test]
fn test_id_repeated() {
let mut id = ClassGroup::id();
let g_anchor = ClassGroup::unknown_order_elem();
let mut g = ClassGroup::unknown_order_elem();
for _ in 0..1000 {
id = ClassGroup::op(&id, &id);
assert_eq!(id, ClassGroup::id());
g = ClassGroup::op(&g, &ClassGroup::id());
assert_eq!(g, g_anchor);
}
}
#[test]
fn test_inv() {
let id = ClassGroup::id();
let g_anchor = ClassGroup::unknown_order_elem();
let mut g = ClassGroup::unknown_order_elem();
for _ in 0..1000 {
g = ClassGroup::op(&g, &g_anchor);
let g_inv = ClassGroup::inv(&g);
assert_eq!(id, ClassGroup::op(&g_inv, &g));
assert_eq!(id, ClassGroup::op(&g, &g_inv));
assert_eq!(g, ClassGroup::inv(&g_inv));
}
}
#[test]
fn test_exp_basic() {
let g_anchor = ClassGroup::unknown_order_elem();
let mut g = ClassGroup::id();
for i in 1..=1000 {
g = ClassGroup::op(&g, &g_anchor);
assert_eq!(&g, &ClassGroup::exp(&g_anchor, &int(i)));
}
}
#[test]
fn test_square_basic() {
let g = ClassGroup::unknown_order_elem();
let mut g4 = ClassGroup::id();
// g^4
for _ in 0..4 {
g4 = ClassGroup::op(&g, &g4);
}
// g^2
let mut g2 = ClassGroup::op(&g, &g);
// g^4
g2 = ClassGroup::square(&g2);
assert_eq!(&g2, &g4);
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/group/rsa.rs | src/group/rsa.rs | //! RSA (2048) group using GMP integers in the `rug` crate.
use super::{ElemFrom, Group, UnknownOrderGroup};
use crate::util::{int, TypeRep};
use rug::Integer;
use std::str::FromStr;
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
/// RSA-2048 group implementation. Modulus taken from
/// [here](https://en.wikipedia.org/wiki/RSA_numbers#RSA-2048). **Note**: If you want to use
/// `Rsa2048` outside the context of this crate, be advised that it treats `x` and `-x` as the same
/// element for sound proofs-of-exponentiation. See BBF (page 9).
pub enum Rsa2048 {}
/// RSA-2048 modulus, taken from [Wikipedia](https://en.wikipedia.org/wiki/RSA_numbers#RSA-2048).
const RSA2048_MODULUS_DECIMAL: &str =
"251959084756578934940271832400483985714292821262040320277771378360436620207075955562640185258807\
8440691829064124951508218929855914917618450280848912007284499268739280728777673597141834727026189\
6375014971824691165077613379859095700097330459748808428401797429100642458691817195118746121515172\
6546322822168699875491824224336372590851418654620435767984233871847744479207399342365848238242811\
9816381501067481045166037730605620161967625613384414360383390441495263443219011465754445417842402\
0924616515723350778707749817125772467962926386356373289912154831438167899885040445364023527381951\
378636564391212010397122822120720357";
lazy_static! {
pub static ref RSA2048_MODULUS: Integer = Integer::from_str(RSA2048_MODULUS_DECIMAL).unwrap();
pub static ref HALF_MODULUS: Integer = RSA2048_MODULUS.clone() / 2;
}
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
/// An RSA 2048 group element, directly wrapping a GMP integer from the `rug` crate.
pub struct Rsa2048Elem(Integer);
impl TypeRep for Rsa2048 {
type Rep = Integer;
fn rep() -> &'static Self::Rep {
&RSA2048_MODULUS
}
}
impl Group for Rsa2048 {
type Elem = Rsa2048Elem;
fn op_(modulus: &Integer, a: &Rsa2048Elem, b: &Rsa2048Elem) -> Rsa2048Elem {
Self::elem(int(&a.0 * &b.0) % modulus)
}
fn id_(_: &Integer) -> Rsa2048Elem {
Self::elem(1)
}
fn inv_(modulus: &Integer, x: &Rsa2048Elem) -> Rsa2048Elem {
Self::elem(x.0.invert_ref(modulus).unwrap())
}
fn exp_(modulus: &Integer, x: &Rsa2048Elem, n: &Integer) -> Rsa2048Elem {
// A side-channel resistant impl is 40% slower; we'll consider it in the future if we need to.
Self::elem(x.0.pow_mod_ref(n, modulus).unwrap())
}
}
impl<T> ElemFrom<T> for Rsa2048
where
Integer: From<T>,
{
fn elem(t: T) -> Rsa2048Elem {
let modulus = Self::rep();
let val = int(t) % modulus;
if val > *HALF_MODULUS {
Rsa2048Elem(<(Integer, Integer)>::from((-val).div_rem_euc_ref(&modulus)).1)
} else {
Rsa2048Elem(val)
}
}
}
impl UnknownOrderGroup for Rsa2048 {
fn unknown_order_elem_(_: &Integer) -> Rsa2048Elem {
Self::elem(2)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_init() {
let _x = &Rsa2048::rep();
}
#[test]
fn test_op() {
let a = Rsa2048::op(&Rsa2048::elem(2), &Rsa2048::elem(3));
assert!(a == Rsa2048::elem(6));
let b = Rsa2048::op(&Rsa2048::elem(-2), &Rsa2048::elem(-3));
assert!(b == Rsa2048::elem(6));
}
/// Tests that `-x` and `x` are treated as the same element.
#[test]
fn test_cosets() {
assert!(Rsa2048::elem(3) == Rsa2048::elem(RSA2048_MODULUS.clone() - 3));
// TODO: Add a trickier coset test involving `op`.
}
#[test]
fn test_exp() {
let a = Rsa2048::exp(&Rsa2048::elem(2), &int(3));
assert!(a == Rsa2048::elem(8));
let b = Rsa2048::exp(&Rsa2048::elem(2), &int(4096));
assert!(
b == Rsa2048::elem(
Integer::parse(
"2172073899553954285893691587818692186975191598984015216589930386158248724081087849265975\
17496727372037176277380476487000099770530440575029170919732871116716934260655466121508332\
32954361536709981055037121764270784874720971933716065574032615073613728454497477072129686\
53887333057277396369601863707823088589609031265453680152037285312247125429494632830592984\
49823194163842041340565518401459166858709515078878951293564147044227487142171138804897039\
34147612551938082501753055296801829703017260731439871110215618988509545129088484396848644\
805730347466581515692959313583208325725034506693916571047785061884094866050395109710"
)
.unwrap()
)
);
let c = Rsa2048::exp(&Rsa2048::elem(2), &RSA2048_MODULUS);
dbg!(c);
let d = Rsa2048::exp(&Rsa2048::elem(2), &(RSA2048_MODULUS.clone() * int(2)));
dbg!(d);
}
#[test]
fn test_inv() {
let x = Rsa2048::elem(2);
let inv = Rsa2048::inv(&x);
assert!(Rsa2048::op(&x, &inv) == Rsa2048::id());
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/group/mod.rs | src/group/mod.rs | //! Implementations for different mathematical groups, each of which satisfies our
//! `UnknownOrderGroup` trait. They can be used with the accumulator and vector commitment
//! structures, or standalone if you have a custom application.
//!
//! The preferred elliptic group implementation is the `Ristretto` group, which is a cyclic subset
//! of the `Ed25519` group.
use crate::util::{int, TypeRep};
use rug::Integer;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::Sized;
mod class;
pub use class::{ClassElem, ClassGroup};
mod ristretto;
pub use ristretto::{Ristretto, RistrettoElem};
mod rsa;
pub use rsa::{Rsa2048, Rsa2048Elem};
/// A mathematical group.
///
/// This trait allows the implementation of standard group routines:
/// - Identity
/// - Op (the fundamental group operation)
/// - Exponentiation
/// - Inverse (particularly where this is efficient to compute)
///
/// The `TypeRep` trait lets us emulate type-level static fields, e.g. the modulus in an RSA group
/// or the discriminant in a class group.
///
/// Clients of this trait need to implement functions of the form `*_`, which take in `TypeRep`
/// data as a parameter. Consumers use functions without the underscore: `id`, `op`, `exp`, and
/// `inv`.
// The other traits are only required here because Rust can't figure out how to do stuff with an
// `Accumulator<G>` even though it's just a wrapped `G::Elem`. If possible we'd remove them.
pub trait Group: Clone + Debug + Eq + Hash + TypeRep + Send + Sync {
// In theory the association `Group::Elem` is bijective, such that it makes sense to write
// something like `Elem::Group::get()`. This would let us define `op`, `exp`, `inv`, etc. on the
//`Elem` type and avoid using prefix notation for all of our group operations. Bijective
// associated types are not currently supported by Rust.
/// The associated group element type for this group.
type Elem: Clone + Debug + Eq + Hash + Sized + Send + Sync;
/// A group-specific wrapper for `id`.
fn id_(rep: &Self::Rep) -> Self::Elem;
/// A group-specific wrapper for `op`.
fn op_(rep: &Self::Rep, a: &Self::Elem, b: &Self::Elem) -> Self::Elem;
/// A group-specific wrapper for `exp`, although it comes with a default implementation via
/// repeated squaring.
///
/// Specific implementations may provide more performant specializations as needed (e.g.
/// Montgomery multiplication for RSA groups).
fn exp_(_rep: &Self::Rep, a: &Self::Elem, n: &Integer) -> Self::Elem {
let (mut val, mut a, mut n) = {
if *n < int(0) {
(Self::id(), Self::inv(a), int(-n))
} else {
(Self::id(), a.clone(), n.clone())
}
};
while n > int(0) {
if n.is_odd() {
val = Self::op(&val, &a);
}
a = Self::op(&a, &a);
n >>= 1;
}
val
}
/// A group-specific wrapper for `inv`.
fn inv_(rep: &Self::Rep, a: &Self::Elem) -> Self::Elem;
// -------------------
// END OF REQUIRED FNS
// -------------------
/// Returns the identity element of the group.
fn id() -> Self::Elem {
Self::id_(Self::rep())
}
/// Applies the group operation to elements `a` and `b` and returns the result.
fn op(a: &Self::Elem, b: &Self::Elem) -> Self::Elem {
Self::op_(Self::rep(), a, b)
}
/// Applies the group operation to `a` and itself `n` times and returns the result.
fn exp(a: &Self::Elem, n: &Integer) -> Self::Elem {
Self::exp_(Self::rep(), a, n)
}
/// Returns the group inverse of `a`.
fn inv(a: &Self::Elem) -> Self::Elem {
Self::inv_(Self::rep(), a)
}
}
/// A group containing elements of unknown order.
///
/// **Note**: This trait does not imply that the group itself has unknown order (e.g. RSA groups).
#[allow(clippy::module_name_repetitions)]
pub trait UnknownOrderGroup: Group {
/// Returns an element of unknown order in the group.
fn unknown_order_elem() -> Self::Elem {
Self::unknown_order_elem_(Self::rep())
}
/// A group-specific wrapper for `unknown_order_elem`.
fn unknown_order_elem_(rep: &Self::Rep) -> Self::Elem;
}
/// Like `From<T>`, but implemented on the `Group` instead of the element type.
pub trait ElemFrom<T>: Group {
/// Returns a group element from an initial value.
fn elem(val: T) -> Self::Elem;
}
/// Computes the product of `alpha_i ^ (p(x) / x_i)`, where `i` is an index into the `alphas` and
/// `x` arrays, and `p(x)` is the product of all `x_i`. See BBF (page 11).
pub fn multi_exp<G: Group>(alphas: &[G::Elem], x: &[Integer]) -> G::Elem {
if alphas.len() == 1 {
return alphas[0].clone();
}
let n_half = alphas.len() / 2;
let alpha_l = &alphas[..n_half];
let alpha_r = &alphas[n_half..];
let x_l = &x[..n_half];
let x_r = &x[n_half..];
let x_star_l = x_l.iter().product();
let x_star_r = x_r.iter().product();
let l = multi_exp::<G>(alpha_l, x_l);
let r = multi_exp::<G>(alpha_r, x_r);
G::op(&G::exp(&l, &x_star_r), &G::exp(&r, &x_star_l))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::int;
#[test]
fn test_multi_exp() {
let alpha_1 = Rsa2048::elem(2);
let alpha_2 = Rsa2048::elem(3);
let x_1 = int(3);
let x_2 = int(2);
let res = multi_exp::<Rsa2048>(
&[alpha_1.clone(), alpha_2.clone()],
&[x_1.clone(), x_2.clone()],
);
assert!(res == Rsa2048::elem(108));
let alpha_3 = Rsa2048::elem(5);
let x_3 = int(1);
let res_2 = multi_exp::<Rsa2048>(&[alpha_1, alpha_2, alpha_3], &[x_1, x_2, x_3]);
assert!(res_2 == Rsa2048::elem(1_687_500));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/proof/poke2.rs | src/proof/poke2.rs | //! Non-Interactive Proofs of Knowledge of Exponent (NI-PoKE2). See BBF (pages 10 and 42) for
//! details.
use crate::group::UnknownOrderGroup;
use crate::hash::{blake2b, hash_to_prime};
use rug::Integer;
#[allow(non_snake_case)]
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
/// Struct for NI-PoKE2.
pub struct Poke2<G: UnknownOrderGroup> {
z: G::Elem,
Q: G::Elem,
r: Integer,
}
impl<G: UnknownOrderGroup> Poke2<G> {
/// Computes a proof that you know `exp` s.t. `base ^ exp = result`.
pub fn prove(base: &G::Elem, exp: &Integer, result: &G::Elem) -> Self {
let g = G::unknown_order_elem();
let z = G::exp(&g, exp);
let l = hash_to_prime(&(base, result, &z));
let alpha = blake2b(&(base, result, &z, &l));
let (q, r) = <(Integer, Integer)>::from(exp.div_rem_euc_ref(&l));
#[allow(non_snake_case)]
let Q = G::exp(&G::op(&base, &G::exp(&g, &alpha)), &q);
Self { z, Q, r }
}
/// Verifies that the prover knows `exp` s.t. `base ^ exp = result`.
#[allow(non_snake_case)]
pub fn verify(base: &G::Elem, result: &G::Elem, Self { z, Q, r }: &Self) -> bool {
let g = G::unknown_order_elem();
let l = hash_to_prime(&(base, result, &z));
let alpha = blake2b(&(base, result, &z, &l));
let lhs = G::op(
&G::exp(Q, &l),
&G::exp(&G::op(&base, &G::exp(&g, &alpha)), &r),
);
let rhs = G::op(result, &G::exp(&z, &alpha));
lhs == rhs
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::group::{ElemFrom, Group, Rsa2048};
use crate::util::int;
#[test]
fn test_poke2() {
// 2^20 = 1048576
let base = Rsa2048::unknown_order_elem();
let exp = int(20);
let result = Rsa2048::elem(1_048_576);
let proof = Poke2::<Rsa2048>::prove(&base, &exp, &result);
assert!(Poke2::verify(&base, &result, &proof));
// Must compare entire structs since elements `z`, `Q`, and `r` are private.
assert!(
proof
== Poke2 {
z: Rsa2048::elem(1_048_576),
Q: Rsa2048::elem(1),
r: int(20)
}
);
// 2^35 = 34359738368
let exp_2 = int(35);
let result_2 = Rsa2048::elem(34_359_738_368u64);
let proof_2 = Poke2::<Rsa2048>::prove(&base, &exp_2, &result_2);
assert!(Poke2::verify(&base, &result_2, &proof_2));
// Cannot verify wrong base/exp/result triple with wrong pair.
assert!(!Poke2::verify(&base, &result_2, &proof));
assert!(
proof_2
== Poke2 {
z: Rsa2048::elem(34_359_738_368u64),
Q: Rsa2048::elem(1),
r: int(35)
}
);
}
#[test]
fn test_poke2_negative() {
let base = Rsa2048::elem(2);
let exp = int(-5);
let result = Rsa2048::exp(&base, &exp);
let proof = Poke2::<Rsa2048>::prove(&base, &exp, &result);
assert!(Poke2::verify(&base, &result, &proof));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/proof/poe.rs | src/proof/poe.rs | //! Non-Interactive Proofs of Exponentiation (NI-PoE). See BBF (pages 8 and 42) for details.
use crate::group::Group;
use crate::hash::hash_to_prime;
use crate::util::int;
use rug::Integer;
#[allow(non_snake_case)]
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
/// Struct for NI-PoE.
pub struct Poe<G: Group> {
Q: G::Elem,
}
impl<G: Group> Poe<G> {
/// Computes a proof that `base ^ exp` was performed to derive `result`.
pub fn prove(base: &G::Elem, exp: &Integer, result: &G::Elem) -> Self {
let l = hash_to_prime(&(base, exp, result));
let q = exp / l;
Self {
Q: G::exp(&base, &q),
}
}
/// Verifies that `base ^ exp = result` using the given proof to avoid computation.
pub fn verify(base: &G::Elem, exp: &Integer, result: &G::Elem, proof: &Self) -> bool {
let l = hash_to_prime(&(base, exp, result));
let r = int(exp % &l);
// w = Q^l * u^r
let w = G::op(&G::exp(&proof.Q, &l), &G::exp(&base, &r));
w == *result
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::group::{ElemFrom, Rsa2048, UnknownOrderGroup};
use crate::util::int;
#[test]
fn test_poe_small_exp() {
// 2^20 = 1048576
let base = Rsa2048::unknown_order_elem();
let exp = int(20);
let result = Rsa2048::elem(1_048_576);
let proof = Poe::<Rsa2048>::prove(&base, &exp, &result);
assert!(Poe::verify(&base, &exp, &result, &proof));
assert!(
proof
== Poe {
Q: Rsa2048::elem(1)
}
);
// 2^35 = 34359738368
let exp_2 = int(35);
let result_2 = Rsa2048::elem(34_359_738_368u64);
let proof_2 = Poe::<Rsa2048>::prove(&base, &exp_2, &result_2);
assert!(Poe::verify(&base, &exp_2, &result_2, &proof_2));
assert!(
proof_2
== Poe {
Q: Rsa2048::elem(1)
}
);
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/proof/pokcr.rs | src/proof/pokcr.rs | //! Non-Interactive Proofs of Knowledge of Co-prime Roots (NI-PoKCR). See BBF (page 11) for details.
use crate::group::{multi_exp, Group};
use rug::Integer;
#[allow(non_snake_case)]
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
/// Struct for NI-PoKCR.
pub struct Pokcr<G: Group> {
w: G::Elem,
}
impl<G: Group> Pokcr<G> {
/// Generates an NI-PoKCR proof.
pub fn prove(witnesses: &[G::Elem]) -> Self {
Self {
w: witnesses.iter().fold(G::id(), |a, b| G::op(&a, b)),
}
}
/// Verifies an NI-PoKCR proof.
pub fn verify(alphas: &[G::Elem], x: &[Integer], proof: &Self) -> bool {
let y = multi_exp::<G>(alphas, x);
let lhs = G::exp(&proof.w, &x.iter().product());
lhs == y
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::group::{ElemFrom, Rsa2048};
use crate::util::int;
#[test]
fn test_pokcr() {
let witnesses = [Rsa2048::elem(2), Rsa2048::elem(3)];
let x = [int(2), int(2)];
let alphas = [Rsa2048::elem(4), Rsa2048::elem(9)];
let proof = Pokcr::<Rsa2048>::prove(&witnesses);
assert!(proof.w == Rsa2048::elem(6));
assert!(Pokcr::verify(&alphas, &x, &proof));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/src/proof/mod.rs | src/proof/mod.rs | //! Succinct proofs over unknown-order groups. These proofs are used as building blocks for many of
//! the cryptographic primitives in this library.
//!
//! Use standalone with caution.
//!
//! Implementations are based on Section 3 of BBF.
mod poe;
pub use poe::Poe;
mod pokcr;
pub use pokcr::Pokcr;
mod poke2;
pub use poke2::Poke2;
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/tests/stress.rs | tests/stress.rs | use accumulator::group::Rsa2048;
use accumulator::Accumulator;
use rand::Rng;
/// Adds 10,000 random primes to accumulator (unverified), then tests 100 more random additions
/// (with verification) and 100 random elements are verified to be nonmembers.
///
/// Takes about 5 minutes.
///
/// TODO: Use a counter instead of random bits.
#[test]
#[ignore]
fn stress_test() {
let mut acc_set = Vec::new();
let mut acc = Accumulator::<Rsa2048, [u8; 32]>::empty();
for _ in 0..100 {
let random_elem = rand::thread_rng().gen::<[u8; 32]>();
acc_set.push(random_elem);
}
println!("Starting add");
acc = acc.clone().add(&acc_set);
println!("{}", acc_set.len());
for _ in 0..100 {
let new_elem = rand::thread_rng().gen::<[u8; 32]>();
assert!(!acc_set.contains(&new_elem));
let (new_acc, add_proof) = acc.clone().add_with_proof(&[new_elem]);
assert!(new_acc.verify_membership(&new_elem, &add_proof));
let (_, del_proof) = new_acc
.clone()
.delete_with_proof(&[(new_elem, add_proof.witness)])
.unwrap();
assert!(new_acc.verify_membership(&new_elem, &del_proof));
let nonmem_proof = acc
.prove_nonmembership(&acc_set, &[new_elem])
.expect("It works");
assert!(acc.verify_nonmembership(&[new_elem], &nonmem_proof));
}
}
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/benches/uint.rs | benches/uint.rs | /// See https://bheisler.github.io/criterion.rs/book/getting_started.html to add more benchmarks.
#[macro_use]
extern crate criterion;
use accumulator::hash::blake2b;
use accumulator::uint::U256;
use criterion::{black_box, Criterion};
use rug::integer::Order;
use std::ops::Mul;
fn bench_mul<T: Mul>(a: T, b: T) {
black_box(a * b);
}
fn criterion_benchmark(c: &mut Criterion) {
let int = blake2b("data");
let mut bytes = [0; 4];
int.write_digits(&mut bytes, Order::LsfBe);
let u256 = U256::from(bytes);
c.bench_function("mul_rug", move |b| b.iter(|| bench_mul(&int, &int)));
c.bench_function("mul_u256", move |b| b.iter(|| bench_mul(u256, u256)));
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/benches/hash/hashes.rs | benches/hash/hashes.rs | /// See https://bheisler.github.io/criterion.rs/book/getting_started.html to add more benchmarks.
#[macro_use]
extern crate criterion;
use criterion::Criterion;
use accumulator::hash::{blake2b, hash_to_prime};
use rand::Rng;
fn bench_blake2() {
blake2b("werg");
}
fn bench_hash_to_prime() {
let random_bytes = rand::thread_rng().gen::<[u8; 32]>();
hash_to_prime(&random_bytes);
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("blake2", |b| b.iter(bench_blake2));
c.bench_function("hash_to_prime", |b| b.iter(bench_hash_to_prime));
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/benches/hash/primality.rs | benches/hash/primality.rs | /// See https://bheisler.github.io/criterion.rs/book/getting_started.html to add more benchmarks.
#[macro_use]
extern crate criterion;
use accumulator::hash::primality::{passes_lucas, passes_miller_rabin_base_2};
use accumulator::uint::u256;
use criterion::Criterion;
use rand::Rng;
use rug::integer::Order;
use rug::Integer;
fn bench_mr2(bytes: &[u8; 32]) {
passes_miller_rabin_base_2(&u256(bytes));
}
fn bench_mr2_rug(bytes: &[u8; 32]) {
let n = Integer::from_digits(bytes, Order::Lsf);
// GMP does not let us demand a base-2 Fermat test so we just do one of random base.
n.is_probably_prime(1);
}
fn bench_lucas(bytes: &[u8; 32]) {
passes_lucas(&u256(bytes));
}
fn criterion_benchmark(c: &mut Criterion) {
let mut random_bytes = rand::thread_rng().gen::<[u8; 32]>();
random_bytes[0] |= 1;
c.bench_function("mr2", move |b| b.iter(|| bench_mr2(&random_bytes)));
c.bench_function("mr2_rug", move |b| b.iter(|| bench_mr2_rug(&random_bytes)));
c.bench_function("lucas", move |b| b.iter(|| bench_lucas(&random_bytes)));
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/benches/accumulator/add.rs | benches/accumulator/add.rs | /// See https://bheisler.github.io/criterion.rs/book/getting_started.html to add more benchmarks.
#[macro_use]
extern crate criterion;
use accumulator::group::{ClassGroup, Rsa2048, UnknownOrderGroup};
use accumulator::hash::hash_to_prime;
use accumulator::{Accumulator, MembershipProof};
use criterion::Criterion;
use rand::Rng;
use rug::Integer;
fn bench_add<G: UnknownOrderGroup>(elems: &[Integer]) {
let acc = Accumulator::<G, Integer>::empty();
acc.add(elems);
}
fn bench_verify<G: UnknownOrderGroup>(
acc: &Accumulator<G, Integer>,
elems: &[Integer],
proof: &MembershipProof<G, Integer>,
) {
assert!(acc.verify_membership_batch(elems, proof));
}
#[allow(dead_code)]
fn bench_iterative_add(elems: &[Integer]) {
let mut acc = Accumulator::<Rsa2048, Integer>::empty();
for elem in elems.chunks(1) {
acc = acc.add(elem);
}
}
fn init_acc<G: UnknownOrderGroup>() -> (
Accumulator<G, Integer>,
MembershipProof<G, Integer>,
Vec<Integer>,
) {
let mut elems = Vec::new();
for _ in 0..100 {
let random_bytes = rand::thread_rng().gen::<[u8; 32]>();
let prime = hash_to_prime(&random_bytes);
elems.push(prime);
}
let acc = Accumulator::<G, Integer>::empty();
let (mut acc, mut proof) = acc.clone().add_with_proof(&elems);
for _ in 0..100 {
elems = vec![];
for _ in 0..100 {
let random_bytes = rand::thread_rng().gen::<[u8; 32]>();
let prime = hash_to_prime(&random_bytes);
elems.push(prime);
}
let (curr_acc, curr_proof) = acc.add_with_proof(&elems);
acc = curr_acc;
proof = curr_proof;
}
(acc, proof, elems)
}
macro_rules! benchmark_add {
($group_type : ty, $criterion: ident) => {
let group_type_str = String::from(stringify!($group_type)).to_lowercase();
let (acc, proof, elems) = init_acc::<$group_type>();
let elems_1 = elems.clone();
let elems_2 = elems.clone();
let elems_3 = elems.clone();
$criterion.bench_function(format!("{}_add_1", group_type_str).as_str(), move |b| {
b.iter(|| bench_add::<$group_type>(&elems_1[0..1]))
});
$criterion.bench_function(format!("{}_add_10", group_type_str).as_str(), move |b| {
b.iter(|| bench_add::<$group_type>(&elems_2[0..10]))
});
$criterion.bench_function(format!("{}_add_100", group_type_str).as_str(), move |b| {
b.iter(|| bench_add::<$group_type>(&elems_3))
});
$criterion.bench_function(format!("{}_verify", group_type_str).as_str(), move |b| {
b.iter(|| bench_verify(&acc, &elems, &proof))
});
};
}
fn criterion_benchmark(c: &mut Criterion) {
benchmark_add! {Rsa2048, c};
benchmark_add! {ClassGroup, c};
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
cambrian/accumulator | https://github.com/cambrian/accumulator/blob/8f460270c274a4a65d777378a587be6dbc986c18/benches/accumulator/delete.rs | benches/accumulator/delete.rs | /// See https://bheisler.github.io/criterion.rs/book/getting_started.html to add more benchmarks.
#[macro_use]
extern crate criterion;
use accumulator::group::{ClassGroup, Rsa2048, UnknownOrderGroup};
use accumulator::{Accumulator, MembershipProof};
use criterion::Criterion;
fn bench_delete<G: UnknownOrderGroup>(
acc: &Accumulator<G, &'static str>,
c_proof: &MembershipProof<G, &'static str>,
) {
acc
.clone()
.delete_with_proof(&[("c", c_proof.clone().witness)])
.expect("Valid delete expected.");
}
macro_rules! benchmark_delete {
($group_type : ty, $criterion: ident) => {
let group_type_str = String::from(stringify!($group_type)).to_lowercase();
let acc_0 = Accumulator::<$group_type, &'static str>::empty().add(&["a", "b"]);
let (acc_1, c_proof) = acc_0.clone().add_with_proof(&["c"]);
$criterion.bench_function(format! {"{}_delete", group_type_str}.as_str(), move |b| {
b.iter(|| bench_delete(&acc_1.clone(), &c_proof))
});
};
}
fn criterion_benchmark(c: &mut Criterion) {
benchmark_delete! {Rsa2048, c};
benchmark_delete! {ClassGroup, c};
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | MIT | 8f460270c274a4a65d777378a587be6dbc986c18 | 2026-01-04T20:15:46.214781Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.