repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
biandratti/huginn-net | https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http2_fingerprint_extractor.rs | huginn-net-http/tests/http2_fingerprint_extractor.rs | use huginn_net_http::http2_fingerprint_extractor::Http2FingerprintExtractor;
use huginn_net_http::http2_parser::{Http2ParseError, HTTP2_CONNECTION_PREFACE};
/// Helper function to create an HTTP/2 frame
///
/// Creates a valid HTTP/2 frame with the specified type, stream ID, and payload.
/// Frame format: [length:24][type:8][flags:8][stream_id:32][payload]
fn create_http2_frame(frame_type: u8, stream_id: u32, payload: &[u8]) -> Vec<u8> {
let mut frame = Vec::new();
// Length (24 bits, big-endian)
let length = payload.len() as u32;
frame.push(((length >> 16) & 0xFF) as u8);
frame.push(((length >> 8) & 0xFF) as u8);
frame.push((length & 0xFF) as u8);
// Type (8 bits)
frame.push(frame_type);
// Flags (8 bits)
frame.push(0x00);
// Stream ID (32 bits, big-endian, with reserved bit cleared)
frame.extend_from_slice(&(stream_id & 0x7FFF_FFFF).to_be_bytes());
// Payload
frame.extend_from_slice(payload);
frame
}
/// Create a SETTINGS frame payload with the given settings
///
/// Each setting is 6 bytes: [id:16][value:32]
fn create_settings_payload(settings: &[(u16, u32)]) -> Vec<u8> {
let mut payload = Vec::new();
for (id, value) in settings {
payload.extend_from_slice(&id.to_be_bytes());
payload.extend_from_slice(&value.to_be_bytes());
}
payload
}
/// Create a WINDOW_UPDATE frame payload
fn create_window_update_payload(increment: u32) -> Vec<u8> {
// WINDOW_UPDATE payload is 4 bytes: [increment:32]
// The increment must have the reserved bit cleared
(increment & 0x7FFF_FFFF).to_be_bytes().to_vec()
}
#[test]
fn test_new() {
let extractor = Http2FingerprintExtractor::new();
assert!(!extractor.fingerprint_extracted());
assert!(extractor.get_fingerprint().is_none());
}
#[test]
fn test_default() {
let extractor = Http2FingerprintExtractor::default();
assert!(!extractor.fingerprint_extracted());
assert!(extractor.get_fingerprint().is_none());
}
#[test]
fn test_add_bytes_empty() {
let mut extractor = Http2FingerprintExtractor::new();
let result = extractor.add_bytes(&[]);
assert!(result.is_ok());
if let Ok(value) = result {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
}
#[test]
fn test_add_bytes_insufficient_data() {
let mut extractor = Http2FingerprintExtractor::new();
// Less than 9 bytes (minimum frame header size)
let result = extractor.add_bytes(&[0x00, 0x00, 0x00, 0x04, 0x00]);
assert!(result.is_ok());
if let Ok(value) = &result {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
}
#[test]
fn test_add_bytes_with_preface() {
let mut extractor = Http2FingerprintExtractor::new();
// Create a SETTINGS frame (required for fingerprint)
let settings_payload = create_settings_payload(&[(1, 65536), (2, 0)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
// Add preface + frame
let mut data = Vec::from(HTTP2_CONNECTION_PREFACE);
data.extend_from_slice(&settings_frame);
let result = extractor.add_bytes(&data);
assert!(result.is_ok());
// Should extract fingerprint since we have SETTINGS frame
if let Ok(fingerprint_result) = &result {
assert!(fingerprint_result.is_some());
}
assert!(extractor.fingerprint_extracted());
assert!(extractor.get_fingerprint().is_some());
}
#[test]
fn test_add_bytes_without_preface() {
let mut extractor = Http2FingerprintExtractor::new();
// Create a SETTINGS frame without preface
let settings_payload = create_settings_payload(&[(1, 65536), (2, 0)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let result = extractor.add_bytes(&settings_frame);
assert!(result.is_ok());
// Should extract fingerprint since we have SETTINGS frame
if let Ok(fingerprint_result) = &result {
assert!(fingerprint_result.is_some());
}
assert!(extractor.fingerprint_extracted());
}
#[test]
fn test_add_bytes_incremental() {
let mut extractor = Http2FingerprintExtractor::new();
// First add preface
let result1 = extractor.add_bytes(HTTP2_CONNECTION_PREFACE);
assert!(result1.is_ok());
if let Ok(value) = &result1 {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
// Then add partial frame header
let partial_header = &[0x00, 0x00, 0x06, 0x04, 0x00, 0x00, 0x00, 0x00];
let result2 = extractor.add_bytes(partial_header);
assert!(result2.is_ok());
if let Ok(value) = &result2 {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
// Then add the rest of the frame (settings payload)
let settings_payload = create_settings_payload(&[(1, 65536)]);
let result3 = extractor.add_bytes(&settings_payload);
assert!(result3.is_ok());
if let Ok(fingerprint_result) = result3 {
// The fingerprint may or may not be extracted depending on frame completeness
// But parsing should succeed
if fingerprint_result.is_some() {
assert!(extractor.fingerprint_extracted());
}
}
}
#[test]
fn test_add_bytes_no_settings_frame() {
let mut extractor = Http2FingerprintExtractor::new();
// Create a frame that's not SETTINGS (e.g., PING)
let ping_frame = create_http2_frame(0x06, 0, &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
let result = extractor.add_bytes(&ping_frame);
assert!(result.is_ok());
// Should not extract fingerprint (no SETTINGS frame)
if let Ok(value) = &result {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
}
#[test]
fn test_add_bytes_after_fingerprint_extracted() {
let mut extractor = Http2FingerprintExtractor::new();
// First extract a fingerprint
let settings_payload = create_settings_payload(&[(1, 65536)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let result1 = extractor.add_bytes(&settings_frame);
assert!(result1.is_ok());
if let Ok(value) = &result1 {
assert!(value.is_some());
}
assert!(extractor.fingerprint_extracted());
// Try to add more data after fingerprint is extracted
let more_data = create_http2_frame(0x06, 0, &[0x00]);
let result2 = extractor.add_bytes(&more_data);
assert!(result2.is_ok());
// Should return None and not process the new data
if let Ok(value) = &result2 {
assert!(value.is_none());
}
// Fingerprint should still be the same
assert!(extractor.fingerprint_extracted());
}
#[test]
fn test_add_bytes_invalid_frame() {
let mut extractor = Http2FingerprintExtractor::new();
// Create invalid frame data (frame header says length is 1000 but we only provide 9 bytes)
let invalid_frame = vec![
0x00, 0x03, 0xE8, // Length: 1000
0x04, // Type: SETTINGS
0x00, // Flags
0x00, 0x00, 0x00,
0x00, // Stream ID: 0
// Missing payload (should have 1000 bytes)
];
let result = extractor.add_bytes(&invalid_frame);
// Should return an error (IncompleteFrame) or Ok(None) if more data is needed
// The parser may handle incomplete frames differently
match result {
Err(err) => {
// If it's an error, check that it's a parsing error
assert!(matches!(err, Http2ParseError::IncompleteFrame));
}
Ok(value) => {
// If it's Ok, it should be None (need more data)
assert!(value.is_none(), "Incomplete frame should return None, not Some");
}
}
}
#[test]
fn test_get_fingerprint_before_extraction() {
let extractor = Http2FingerprintExtractor::new();
assert!(extractor.get_fingerprint().is_none());
}
#[test]
fn test_get_fingerprint_after_extraction() {
let mut extractor = Http2FingerprintExtractor::new();
let settings_payload = create_settings_payload(&[(1, 65536), (2, 0)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let _ = extractor.add_bytes(&settings_frame);
let fingerprint = extractor.get_fingerprint();
assert!(fingerprint.is_some());
if let Some(fp) = fingerprint {
assert_eq!(fp.settings.len(), 2);
assert!(!fp.fingerprint.is_empty());
assert!(!fp.hash.is_empty());
}
}
#[test]
fn test_fingerprint_extracted() {
let mut extractor = Http2FingerprintExtractor::new();
assert!(!extractor.fingerprint_extracted());
let settings_payload = create_settings_payload(&[(1, 65536)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let _ = extractor.add_bytes(&settings_frame);
assert!(extractor.fingerprint_extracted());
}
#[test]
fn test_reset() {
let mut extractor = Http2FingerprintExtractor::new();
// Extract a fingerprint first
let settings_payload = create_settings_payload(&[(1, 65536)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let _ = extractor.add_bytes(&settings_frame);
assert!(extractor.fingerprint_extracted());
// Reset
extractor.reset();
// Should be back to initial state
assert!(!extractor.fingerprint_extracted());
assert!(extractor.get_fingerprint().is_none());
// Should be able to extract again
let _ = extractor.add_bytes(&settings_frame);
assert!(extractor.fingerprint_extracted());
}
#[test]
fn test_complete_fingerprint_with_all_components() {
let mut extractor = Http2FingerprintExtractor::new();
// Create SETTINGS frame
let settings_payload = create_settings_payload(&[
(1, 65536), // HEADER_TABLE_SIZE
(2, 0), // ENABLE_PUSH
(3, 1000), // MAX_CONCURRENT_STREAMS
]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
// Create WINDOW_UPDATE frame
let window_update_payload = create_window_update_payload(15663105);
let window_update_frame = create_http2_frame(0x08, 0, &window_update_payload);
// Create PRIORITY frame
let priority_payload = vec![
0x00, 0x00, 0x00, 0x03, // depends_on: 3 (no exclusive bit)
220, // weight: 220
];
let priority_frame = create_http2_frame(0x02, 1, &priority_payload);
// Combine all frames
let mut data = Vec::from(HTTP2_CONNECTION_PREFACE);
data.extend_from_slice(&settings_frame);
data.extend_from_slice(&window_update_frame);
data.extend_from_slice(&priority_frame);
let result = extractor.add_bytes(&data);
assert!(result.is_ok());
if let Ok(fingerprint_result) = &result {
assert!(fingerprint_result.is_some());
}
if let Some(fingerprint) = extractor.get_fingerprint() {
assert_eq!(fingerprint.settings.len(), 3);
assert_eq!(fingerprint.window_update, 15663105);
assert_eq!(fingerprint.priority_frames.len(), 1);
}
}
#[test]
fn test_multiple_add_bytes_calls() {
let mut extractor = Http2FingerprintExtractor::new();
// Split data across multiple calls
let settings_payload = create_settings_payload(&[(1, 65536), (2, 0), (3, 1000)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
// Split the frame into parts
let mid_point = settings_frame.len() / 2;
let part1 = &settings_frame[..mid_point];
let part2 = &settings_frame[mid_point..];
let result1 = extractor.add_bytes(part1);
assert!(result1.is_ok());
if let Ok(value) = &result1 {
assert!(value.is_none());
}
let result2 = extractor.add_bytes(part2);
assert!(result2.is_ok());
if let Ok(fingerprint_result) = &result2 {
assert!(fingerprint_result.is_some());
}
assert!(extractor.fingerprint_extracted());
}
#[test]
fn test_parsed_offset_tracking() {
let mut extractor = Http2FingerprintExtractor::new();
// Add preface
let _ = extractor.add_bytes(HTTP2_CONNECTION_PREFACE);
// Add a SETTINGS frame
let settings_payload = create_settings_payload(&[(1, 65536)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let _ = extractor.add_bytes(&settings_frame);
// Add another frame after fingerprint is extracted
// This should be ignored since fingerprint is already extracted
let ping_frame = create_http2_frame(0x06, 0, &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]);
let result = extractor.add_bytes(&ping_frame);
assert!(result.is_ok());
if let Ok(value) = &result {
assert!(value.is_none());
}
}
#[test]
fn test_preface_detection_only_on_first_call() {
let mut extractor = Http2FingerprintExtractor::new();
// Add data without preface first
let settings_payload = create_settings_payload(&[(1, 65536)]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let _ = extractor.add_bytes(&settings_frame);
// Reset and add preface + frame
extractor.reset();
let mut data = Vec::from(HTTP2_CONNECTION_PREFACE);
data.extend_from_slice(&settings_frame);
let result = extractor.add_bytes(&data);
assert!(result.is_ok());
if let Ok(value) = &result {
assert!(value.is_some());
}
}
#[test]
fn test_empty_settings_frame() {
let mut extractor = Http2FingerprintExtractor::new();
// Create SETTINGS frame with empty payload
let settings_frame = create_http2_frame(0x04, 0, &[]);
let result = extractor.add_bytes(&settings_frame);
assert!(result.is_ok());
// Empty SETTINGS frame should not produce a fingerprint
// (extract_akamai_fingerprint requires at least one setting)
if let Ok(value) = &result {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
}
#[test]
fn test_frame_too_small_for_parsing() {
let mut extractor = Http2FingerprintExtractor::new();
// Add preface
let _ = extractor.add_bytes(HTTP2_CONNECTION_PREFACE);
// Add data that's less than 9 bytes (minimum frame header)
let small_data = &[0x00, 0x00, 0x04];
let result = extractor.add_bytes(small_data);
assert!(result.is_ok());
if let Ok(value) = &result {
assert!(value.is_none());
}
assert!(!extractor.fingerprint_extracted());
}
#[test]
fn test_chrome_like_fingerprint() {
let mut extractor = Http2FingerprintExtractor::new();
// Chrome-like SETTINGS frame
let settings_payload = create_settings_payload(&[
(1, 65536), // HEADER_TABLE_SIZE: 65536
(2, 0), // ENABLE_PUSH: 0
(3, 1000), // MAX_CONCURRENT_STREAMS: 1000
(4, 6291456), // INITIAL_WINDOW_SIZE: 6291456
(5, 16384), // MAX_FRAME_SIZE: 16384
(6, 262144), // MAX_HEADER_LIST_SIZE: 262144
]);
let settings_frame = create_http2_frame(0x04, 0, &settings_payload);
let mut data = Vec::from(HTTP2_CONNECTION_PREFACE);
data.extend_from_slice(&settings_frame);
let result = extractor.add_bytes(&data);
assert!(result.is_ok());
if let Ok(Some(ref fingerprint)) = result {
assert_eq!(fingerprint.settings.len(), 6);
// Verify fingerprint string contains expected values
assert!(fingerprint.fingerprint.contains("1:65536"));
assert!(fingerprint.fingerprint.contains("2:0"));
assert!(fingerprint.fingerprint.contains("3:1000"));
}
}
| rust | Apache-2.0 | ef479d3357bc040b4dcdacffbb4ab8db07051f1b | 2026-01-04T20:21:12.648216Z | false |
biandratti/huginn-net | https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/packet_parser.rs | huginn-net-http/tests/packet_parser.rs | use huginn_net_http::packet_parser::{
detect_datalink_format, parse_packet, DatalinkFormat, IpPacket,
};
use pnet::packet::Packet;
#[test]
fn test_detect_null_datalink() {
// NULL datalink: 4-byte header + IPv6 packet
let null_packet = vec![
0x1e, 0x00, 0x00, 0x00, // NULL header
0x60, 0x00, 0x00, 0x00, // IPv6 header start (version=6)
0x00, 0x14, 0x06, 0x40, // IPv6 payload length, next header (TCP), hop limit
// Add minimal IPv6 addresses (32 bytes total)
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, // src
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, // dst
];
let format = detect_datalink_format(&null_packet);
assert_eq!(format, Some(DatalinkFormat::Null));
}
#[test]
fn test_detect_raw_ipv4() {
// Raw IPv4 packet (no datalink header)
let raw_ipv4 = vec![
0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28
0x00, 0x00, 0x40, 0x00, // ID=0, Flags=0x4000 (DF), Fragment Offset=0
0x40, 0x06, 0x7c, 0xb0, // TTL=64, Protocol=TCP(6), Checksum
0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1
0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2
];
let format = detect_datalink_format(&raw_ipv4);
assert_eq!(format, Some(DatalinkFormat::RawIp));
}
#[test]
fn test_detect_raw_ipv6() {
// Raw IPv6 packet (no datalink header)
let raw_ipv6 = vec![
0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0
0x00, 0x00, 0x06, 0x40, // Payload Length=0, Next Header=TCP(6), Hop Limit=64
// Source address: 2001:db8::1
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, // Destination address: 2001:db8::2
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02,
];
let format = detect_datalink_format(&raw_ipv6);
assert_eq!(format, Some(DatalinkFormat::RawIp));
}
#[test]
fn test_detect_ethernet_ipv4() {
// Ethernet frame with IPv4 payload
let ethernet_ipv4 = vec![
// Ethernet header (14 bytes)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Destination MAC
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Source MAC
0x08, 0x00, // EtherType: IPv4
// IPv4 header
0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28
0x00, 0x00, 0x40, 0x00, // ID=0, Flags=0x4000 (DF), Fragment Offset=0
0x40, 0x06, 0x7c, 0xb0, // TTL=64, Protocol=TCP(6), Checksum
0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1
0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2
];
let format = detect_datalink_format(ðernet_ipv4);
assert_eq!(format, Some(DatalinkFormat::Ethernet));
}
#[test]
fn test_detect_ethernet_ipv6() {
// Ethernet frame with IPv6 payload
let ethernet_ipv6 = vec![
// Ethernet header (14 bytes)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Destination MAC
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Source MAC
0x86, 0xDD, // EtherType: IPv6
// IPv6 header (40 bytes)
0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0
0x00, 0x00, 0x06, 0x40, // Payload Length=0, Next Header=TCP(6), Hop Limit=64
// Source address: 2001:db8::1
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, // Destination address: 2001:db8::2
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02,
];
let format = detect_datalink_format(ðernet_ipv6);
assert_eq!(format, Some(DatalinkFormat::Ethernet));
}
#[test]
fn test_parse_ethernet_ipv4() {
// Test parsing Ethernet frame with IPv4
let ethernet_ipv4 = vec![
// Ethernet header (14 bytes)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Destination MAC
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Source MAC
0x08, 0x00, // EtherType: IPv4
// IPv4 header
0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28
0x00, 0x00, 0x40, 0x00, // ID=0, Flags=0x4000 (DF), Fragment Offset=0
0x40, 0x06, 0x7c, 0xb0, // TTL=64, Protocol=TCP(6), Checksum
0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1
0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2
];
match parse_packet(ðernet_ipv4) {
IpPacket::Ipv4(ipv4) => {
assert_eq!(ipv4.get_version(), 4); // Version=4
assert_eq!(ipv4.get_header_length(), 5); // IHL=5 (5 * 4 = 20 bytes)
}
_ => panic!("Expected IPv4 packet"),
}
}
#[test]
fn test_parse_raw_ipv6() {
// Test parsing raw IPv6 packet
let raw_ipv6 = vec![
0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0
0x00, 0x00, 0x06, 0x40, // Payload Length=0, Next Header=TCP(6), Hop Limit=64
// Source address: 2001:db8::1
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, // Destination address: 2001:db8::2
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02,
];
match parse_packet(&raw_ipv6) {
IpPacket::Ipv6(ipv6) => {
assert_eq!(ipv6.get_version(), 6); // Version=6
assert_eq!(ipv6.packet().len(), 40); // IPv6 header length
}
_ => panic!("Expected IPv6 packet"),
}
}
#[test]
fn test_parse_null_datalink_ipv6() {
// Test parsing NULL datalink with IPv6
let null_ipv6 = vec![
0x1e, 0x00, 0x00, 0x00, // NULL header
0x60, 0x00, 0x00, 0x00, // IPv6 header start (version=6)
0x00, 0x14, 0x06, 0x40, // IPv6 payload length, next header (TCP), hop limit
// Add minimal IPv6 addresses (32 bytes total)
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, // src
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, // dst
];
match parse_packet(&null_ipv6) {
IpPacket::Ipv6(ipv6) => {
assert_eq!(ipv6.get_version(), 6); // Version=6
assert_eq!(ipv6.packet().len(), 40); // IPv6 header (40 bytes total)
}
_ => panic!("Expected NULL datalink IPv6 packet"),
}
}
| rust | Apache-2.0 | ef479d3357bc040b4dcdacffbb4ab8db07051f1b | 2026-01-04T20:21:12.648216Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/build.rs | build.rs | extern crate protobuf_codegen_pure;
fn main() {
protobuf_codegen_pure::run(protobuf_codegen_pure::Args {
out_dir: "src",
input: &["proto/message_wire.proto"],
includes: &["proto"],
customize: protobuf_codegen_pure::Customize {
..Default::default()
}
}).expect("protoc");
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/errors.rs | src/errors.rs | use std::io;
use ws;
use ring;
#[cfg(feature = "media")]
use reqwest;
use json;
use base64;
use protobuf;
error_chain! {
foreign_links {
Io(io::Error);
Websocket(ws::Error);
Crypto(ring::error::Unspecified);
Reqwest(reqwest::Error) #[cfg(feature = "media")];
Json(json::Error);
Base64(base64::DecodeError);
Protobuf(protobuf::ProtobufError);
}
errors {
NodeAttributeMissing(attribute: &'static str) {
description("missing node attribute")
display("missing mode attribute '{}'", attribute)
}
JsonFieldMissing(field: &'static str) {
description("missing field in json")
display("missing field '{}' in json", field)
}
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/lib.rs | src/lib.rs | extern crate ws;
extern crate simple_logger;
#[macro_use]
extern crate log;
extern crate url;
#[macro_use]
extern crate json;
extern crate ring;
extern crate base64;
extern crate qrcode;
extern crate image;
extern crate untrusted;
#[macro_use]
extern crate serde_derive;
extern crate bincode;
extern crate protobuf;
extern crate byteorder;
extern crate chrono;
#[macro_use]
extern crate error_chain;
#[cfg(feature = "media")]
extern crate reqwest;
pub mod connection;
pub mod message;
#[cfg(feature = "media")]
pub mod media;
mod message_wire;
mod node_protocol;
mod node_wire;
mod json_protocol;
mod websocket_protocol;
pub mod crypto;
mod timeout;
pub mod errors;
use std::str::FromStr;
use errors::*;
#[derive(Debug, Clone, PartialOrd, PartialEq)]
pub struct Jid {
pub id: String,
pub is_group: bool,
}
/// Jid used to identify either a group or an individual
impl Jid {
pub fn to_string(&self) -> String {
self.id.to_string() + if self.is_group { "@g.us" } else { "@c.us" }
}
/// If the Jid is from an individual return the international phonenumber, else None
pub fn phonenumber(&self) -> Option<String> {
if !self.is_group {
Some("+".to_string() + &self.id)
} else {
None
}
}
pub fn from_phonenumber(mut phonenumber: String) -> Result<Jid> {
if phonenumber.starts_with('+') {
phonenumber.remove(0);
}
if phonenumber.chars().any(|c| !c.is_digit(10)) {
return Err("not a valid phonenumber".into());
}
Ok(Jid { id: phonenumber, is_group: false })
}
}
impl FromStr for Jid {
type Err = Error;
fn from_str(jid: &str) -> Result<Jid> {
let at = jid.find('@').ok_or("jid missing @")?;
let (id, surfix) = jid.split_at(at);
Ok(Jid {
id: id.to_string(),
is_group: match surfix {
"@c.us" => false,
"@g.us" => true,
"@s.whatsapp.net" => false,
"@broadcast" => false, //TODO
_ => return Err("invalid surfix".into())
},
})
}
}
#[derive(Debug)]
pub struct Contact {
///name used in phonebook, set by user
pub name: Option<String>,
///name used in pushnotification, set by opposite peer
pub notify: Option<String>,
pub jid: Jid,
}
#[derive(Debug)]
pub struct Chat {
pub name: Option<String>,
pub jid: Jid,
pub last_activity: i64,
pub pin_time: Option<i64>,
pub mute_until: Option<i64>,
pub spam: bool,
pub read_only: bool,
}
#[derive(Debug, Copy, Clone)]
pub enum PresenceStatus {
Unavailable,
Available,
Typing,
Recording,
}
#[derive(Debug)]
pub struct GroupMetadata {
pub creation_time: i64,
pub id: Jid,
pub owner: Option<Jid>,
pub participants: Vec<(Jid, bool)>,
pub subject: String,
pub subject_owner: Jid,
pub subject_time: i64,
}
#[derive(Debug, Copy, Clone)]
pub enum GroupParticipantsChange {
Add,
Remove,
Promote,
Demote,
}
#[derive(Debug, Copy, Clone)]
pub enum ChatAction {
Add,
Remove,
Archive,
Unarchive,
Clear,
Pin(i64),
Unpin,
Mute(i64),
Unmute,
Read,
Unread,
}
#[derive(Copy, Clone)]
pub enum MediaType {
Image,
Video,
Audio,
Document,
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/timeout.rs | src/timeout.rs | #![allow(deprecated)]
use std::time::{SystemTime, UNIX_EPOCH};
use ws::Sender;
use ws::util::{Token, Timeout};
pub const RESPONSE_TIMEOUT: TimeoutWindow = TimeoutWindow { min: 3000, max: 5500 };
pub const PING_TIMEOUT: TimeoutWindow = TimeoutWindow { min: 12000, max: 16000 };
#[derive(Copy, Clone)]
pub struct TimeoutWindow {
min: u64,
max: u64
}
pub struct AbsoluteTimeoutWindow {
min: u64,
max: u64
}
impl AbsoluteTimeoutWindow {
fn new(timeout_window: &TimeoutWindow) -> AbsoluteTimeoutWindow {
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() * 1000;
AbsoluteTimeoutWindow {
min: now + timeout_window.min,
max: now + timeout_window.max
}
}
}
#[derive(Copy, Clone)]
pub enum TimeoutState {
Deathline,
Normal
}
pub struct TimeoutManager {
window: AbsoluteTimeoutWindow,
state: TimeoutState,
timeout: Option<Timeout>,
token: Token
}
impl TimeoutManager {
pub fn new(sender: &Sender, window: TimeoutWindow, state: TimeoutState) -> TimeoutManager {
let absolute_window = AbsoluteTimeoutWindow::new(&window);
let token = Token(2);
sender.timeout(window.max, token).unwrap();
TimeoutManager {
window: absolute_window,
state,
timeout: None,
token
}
}
pub fn arm(&mut self, sender: &Sender, new_window: TimeoutWindow, new_state: TimeoutState) {
self.state = new_state;
let new_absolute_window = AbsoluteTimeoutWindow::new(&new_window);
if self.window.max < new_absolute_window.min || self.window.max > new_absolute_window.max {
self.window = new_absolute_window;
self.timeout.take().map(|timeout| sender.cancel(timeout));
self.token = Token(self.token.0 + 1);
sender.timeout(new_window.max, self.token).unwrap();
}
}
pub fn disarm(&mut self) {
self.timeout = None;
}
pub fn on_new_timeout(&mut self, token: Token, timeout: Timeout) {
if token == self.token {
self.timeout = Some(timeout);
}
}
pub fn on_timeout(&mut self, token: Token) -> Option<TimeoutState> {
if token == self.token {
self.timeout = None;
Some(self.state)
} else {
None
}
}
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/node_protocol.rs | src/node_protocol.rs | use std::collections::HashMap;
use Contact;
use Jid;
use Chat;
use ChatAction;
use PresenceStatus;
use GroupParticipantsChange;
use node_wire::{Node, NodeContent, IntoCow};
use message::{ChatMessage, MessageAck, MessageAckLevel, Peer, MessageId};
use errors::*;
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum MessageEventType {
Relay,
Last,
Before,
Set
}
#[derive(Debug)]
pub enum GroupCommand {
Create(String),
ParticipantsChange(Jid, GroupParticipantsChange),
//TODO
#[allow(dead_code)]
Leave(Jid)
}
#[derive(Debug)]
pub enum AppEvent {
Message(Box<ChatMessage>),
MessageAck(MessageAck),
// App only
ContactDelete(Jid),
//App only
ContactAddChange(Contact),
ChatAction(Jid, ChatAction),
//App only
Battery(u8),
//Client only
MessageRead { id: MessageId, peer: Peer },
//Client only
MessagePlayed { id: MessageId, peer: Peer },
//Client only
GroupCommand { inducer: Jid, id: String, participants: Vec<Jid>, command: GroupCommand },
//Client only
PresenceChange(PresenceStatus, Option<Jid>),
//Client only
StatusChange(String),
//Client only
NotifyChange(String),
//Client only
BlockProfile { unblock: bool, jid: Jid }
}
#[derive(Debug)]
pub enum Query {
MessagesBefore { jid: Jid, id: String, count: u16 }
}
#[derive(Debug)]
pub enum AppMessage {
MessagesEvents(Option<MessageEventType>, Vec<AppEvent>),
//App only
Contacts(Vec<Contact>),
//App only
Chats(Vec<Chat>),
//Client only
Query(Query)
}
impl AppMessage {
pub fn deserialize(root_node: Node) -> Result<AppMessage> {
let event_type = root_node.get_attribute("add").and_then(|add| MessageEventType::from_node(add.as_str())).ok();
match root_node.desc() {
"action" => {
if let NodeContent::List(list) = root_node.content {
let mut app_events = Vec::with_capacity(list.len());
for mut node in list {
match node.desc() {
"message" => {
if let NodeContent::Binary(ref content) = node.content {
app_events.push(AppEvent::Message(Box::new(ChatMessage::from_proto_binary(content)?)));
} else {
bail!{ "invalid nodetype for chatmessage" }
}
}
"received" => {
app_events.push(AppEvent::MessageAck(
MessageAck::from_app_message(MessageId(node.take_attribute("index")?.into_string()),
MessageAckLevel::from_node(node.get_attribute("type")?.as_str())?,
node.take_attribute("jid")?.into_jid()?,
node.take_attribute("participant").and_then(|participant| participant.into_jid()).ok(),
node.take_attribute("owner")?.as_str().parse().map_err(|_| "NAN")?)))
}
"read" => {
let jid = node.take_attribute("jid")?.into_jid()?;
app_events.push(AppEvent::ChatAction(jid, if node.take_attribute("type").ok().map_or(true, |typ| typ.as_str() != "false") {
ChatAction::Read
} else {
ChatAction::Unread
}));
}
"user" => {
let contact = Contact::parse_node(&mut node)?;
app_events.push(if contact.name.is_some() {
AppEvent::ContactAddChange(contact)
} else {
AppEvent::ContactDelete(contact.jid)
})
}
"chat" => {
let jid = node.take_attribute("jid")?.into_jid()?;
let action = ChatAction::from_node(&mut node)?;
app_events.push(AppEvent::ChatAction(jid, action));
}
"battery" => {
let level = node.take_attribute("value")?.as_str().parse().map_err(|_| "NAN")?;
app_events.push(AppEvent::Battery(level));
}
_ => {}
}
}
Ok(AppMessage::MessagesEvents(event_type, app_events))
} else {
bail!{ "invalid or unsupported action type"}
}
}
"response" => {
match root_node.get_attribute("type")?.as_str() {
"contacts" => {
if let NodeContent::List(mut list) = root_node.content {
let mut contacts = Vec::with_capacity(list.len());
for mut node in list {
contacts.push(Contact::parse_node(&mut node)?);
}
Ok(AppMessage::Contacts(contacts))
} else {
bail!{ "Invalid nodetype for contacts"}
}
}
"chat" => {
if let NodeContent::List(mut list) = root_node.content {
let mut chats = Vec::with_capacity(list.len());
for mut node in list {
chats.push(Chat::parse_node(&mut node)?);
}
Ok(AppMessage::Chats(chats))
} else {
bail!{ "Invalid nodetype for chats"}
}
}
_ => bail!{ "invalid or unsupported 'response' type"}
}
}
_ => bail!{ "invalid or unsupported app message type"}
}
}
pub fn serialize(self, epoch: u32) -> Node {
let mut attributes = HashMap::new();
attributes.insert("epoch".cow(), NodeContent::String(epoch.to_string().cow()));
match self {
AppMessage::MessagesEvents(typ, events) => {
attributes.insert("type".cow(), NodeContent::Token(typ.unwrap().into_node()));
Node::new("action", attributes, NodeContent::List(
events.into_iter().map(|event| {
match event {
AppEvent::MessageRead { id, peer } => {
let mut attributes = HashMap::new();
attributes.insert("index".cow(), NodeContent::String(id.0.cow()));
match peer {
Peer::Individual(jid) => {
attributes.insert("jid".cow(), NodeContent::Jid(jid));
}
Peer::Group { group, participant } => {
attributes.insert("jid".cow(), NodeContent::Jid(group));
attributes.insert("participant".cow(), NodeContent::Jid(participant));
}
}
attributes.insert("owner".cow(), NodeContent::Token("false"));
attributes.insert("count".cow(), NodeContent::String("1".cow()));
Node::new("read", attributes, NodeContent::None)
}
AppEvent::MessagePlayed { id, peer } => {
let mut attributes = HashMap::new();
attributes.insert("type".cow(), NodeContent::Token("played"));
attributes.insert("index".cow(), NodeContent::String(id.0.cow()));
match peer {
Peer::Individual(jid) => { attributes.insert("from".cow(), NodeContent::Jid(jid)); }
Peer::Group { group, participant } => {
attributes.insert("from".cow(), NodeContent::Jid(group));
attributes.insert("participant".cow(), NodeContent::Jid(participant));
}
}
attributes.insert("owner".cow(), NodeContent::Token("false"));
attributes.insert("count".cow(), NodeContent::String("1".cow()));
Node::new("received", attributes, NodeContent::None)
}
AppEvent::Message(message) => {
Node::new("message", HashMap::new(), NodeContent::Binary(message.into_proto_binary()))
}
AppEvent::GroupCommand { inducer, id, participants, command } => {
let mut attributes = HashMap::new();
match command {
GroupCommand::Create(subject) => {
attributes.insert("subject".cow(), NodeContent::String(subject.cow()));
attributes.insert("type".cow(), NodeContent::Token("create"));
}
GroupCommand::ParticipantsChange(jid, participants_change) => {
attributes.insert("type".cow(), NodeContent::Token(participants_change.into_node()));
attributes.insert("jid".cow(), NodeContent::Jid(jid));
}
GroupCommand::Leave(jid) => {
attributes.insert("type".cow(), NodeContent::Token("leave"));
attributes.insert("jid".cow(), NodeContent::Jid(jid));
}
}
attributes.insert("author".cow(), NodeContent::Jid(inducer));
attributes.insert("id".cow(), NodeContent::String(id.cow()));
Node::new(
"group",
attributes,
NodeContent::List(participants.into_iter().map(|jid| {
let mut attributes = HashMap::new();
attributes.insert("jid".cow(), NodeContent::Jid(jid));
Node::new("participant", attributes, NodeContent::None)
}).collect())
)
}
AppEvent::PresenceChange(status, jid) => {
let mut attributes = HashMap::new();
attributes.insert("type".cow(), NodeContent::Token(status.into_node()));
if let Some(jid) = jid {
attributes.insert("to".cow(), NodeContent::Jid(jid));
}
Node::new("presence", attributes, NodeContent::None)
}
AppEvent::ChatAction(jid, action) => {
let mut attributes = HashMap::new();
attributes.insert("jid".cow(), NodeContent::Jid(jid));
match action {
ChatAction::Pin(time) => {
attributes.insert("type".cow(), NodeContent::String("pin".cow()));
attributes.insert("pin".cow(), NodeContent::String(time.to_string().cow()));
}
//Fixme
ChatAction::Unpin => {
attributes.insert("type".cow(), NodeContent::String("pin".cow()));
//attributes.insert("previous".to_string(), NodeContent::String(time.to_string()));
}
ChatAction::Mute(time) => {
attributes.insert("type".cow(), NodeContent::Token("mute"));
attributes.insert("mute".cow(), NodeContent::String(time.to_string().cow()));
}
//Fixme
ChatAction::Unmute => {
attributes.insert("type".cow(), NodeContent::Token("mute"));
}
ChatAction::Archive => {
attributes.insert("type".cow(), NodeContent::Token("archive"));
}
ChatAction::Unarchive => {
attributes.insert("type".cow(), NodeContent::Token("unarchive"));
}
_ => unimplemented!()
}
Node::new("chat", attributes, NodeContent::None)
}
AppEvent::StatusChange(status) => {
Node::new("status", HashMap::new(), NodeContent::String(status.cow()))
}
AppEvent::NotifyChange(name) => {
let mut node = Node::new_empty("profile");
node.set_attribute("name", NodeContent::String(name.cow()));
node
}
AppEvent::BlockProfile { unblock, jid } => {
let mut attributes = HashMap::new();
attributes.insert("jid".cow(), NodeContent::Jid(jid));
let user = Node::new("user", attributes, NodeContent::None);
let mut attributes = HashMap::new();
attributes.insert("type".cow(), NodeContent::Token(if unblock { "remove" } else { "add" }));
Node::new(
"block",
attributes,
NodeContent::List(vec![user])
)
}
_ => unimplemented!()
}
}).collect())
)
}
AppMessage::Query(query) => {
match query {
Query::MessagesBefore { jid, id, count } => {
let mut node = Node::new_empty("query");
node.set_attribute("type", NodeContent::Token("message"));
node.set_attribute("kind", NodeContent::Token("before"));
node.set_attribute("jid", NodeContent::Jid(jid));
node.set_attribute("count", NodeContent::String(count.to_string().cow()));
node.set_attribute("index", NodeContent::String(id.cow()));
node.set_attribute("owner", NodeContent::Token("false"));
node
}
}
}
_ => unreachable!()
}
}
}
pub fn parse_message_response(root_node: Node) -> Result<Vec<ChatMessage>> {
if root_node.desc() == "response" && root_node.get_attribute("type").ok().map_or(false, |typ| typ.as_str() == "message") {
if let NodeContent::List(nodes) = root_node.content {
let mut messages = Vec::with_capacity(nodes.len());
for node in nodes {
if let NodeContent::Binary(ref content) = node.content {
messages.push(ChatMessage::from_proto_binary(content)?);
} else {
bail!{ "invalid nodetype for chatmessage" }
}
}
Ok(messages)
} else {
bail!{ "invalid nodetype for chatmessage" }
}
} else {
bail!{ "invalid response" }
}
}
impl Contact {
fn parse_node(node: &mut Node) -> Result<Contact> {
Ok(Contact {
name: node.take_attribute("name").map(|name| name.into_string()).ok(),
notify: node.take_attribute("notify").map(|notify| notify.into_string()).ok(),
jid: node.take_attribute("jid")?.into_jid()?
})
}
}
impl Chat {
fn parse_node(node: &mut Node) -> Result<Chat> {
Ok(Chat {
name: node.take_attribute("name").map(|name| name.into_string()).ok(),
jid: node.take_attribute("jid")?.into_jid()?,
last_activity: node.take_attribute("t")?.into_string().parse().map_err(|_| "NAN")?,
spam: node.take_attribute("spam")?.into_string().parse().map_err(|_| "NAN")?,
mute_until: node.take_attribute("mute").ok().and_then(|t| t.into_string().parse().ok()),
pin_time: node.take_attribute("pin").ok().and_then(|t| t.into_string().parse().ok()),
read_only: node.take_attribute("read_only").ok().and_then(|read_only| read_only.into_string().parse().ok()).unwrap_or(false),
})
}
}
impl MessageAckLevel {
fn from_node(value: &str) -> Result<MessageAckLevel> {
Ok(match value {
"message" => MessageAckLevel::Received,
"played" => MessageAckLevel::Played,
"read" => MessageAckLevel::Read,
_ => bail!{"invalid message ack level {}", value}
})
}
#[allow(dead_code)]
fn to_node(self) -> &'static str {
match self {
MessageAckLevel::Received => "message",
MessageAckLevel::Played => "played",
MessageAckLevel::Read => "read",
_ => unimplemented!()
}
}
}
impl MessageEventType {
fn from_node(value: &str) -> Result<MessageEventType> {
Ok(match value {
"last" => MessageEventType::Last,
"before" => MessageEventType::Before,
"relay" => MessageEventType::Relay,
"set" => MessageEventType::Set,
_ => bail!{"invalid message event type {}", value}
})
}
}
impl MessageEventType {
fn into_node(self) -> &'static str {
match self {
MessageEventType::Last => "last",
MessageEventType::Before => "before",
MessageEventType::Relay => "relay",
MessageEventType::Set => "set",
}
}
}
impl ChatAction {
fn from_node(node: &mut Node) -> Result<ChatAction> {
Ok(match node.take_attribute("type")?.as_str() {
"spam" => ChatAction::Add,
"delete" => ChatAction::Remove,
"archive" => ChatAction::Archive,
"unarchive" => ChatAction::Unarchive,
"clear" => ChatAction::Clear,
"pin" => {
if let Ok(time) = node.take_attribute("pin") {
ChatAction::Pin(time.as_str().parse().map_err(|_| "NAN")?)
} else {
ChatAction::Unpin
}
}
"mute" => {
if let Ok(time) = node.take_attribute("mute") {
ChatAction::Mute(time.as_str().parse().map_err(|_| "NAN")?)
} else {
ChatAction::Unmute
}
}
_ => bail!{ "invalid or unsupported chat action type"}
})
}
}
impl GroupParticipantsChange {
fn into_node(self) -> &'static str {
match self {
GroupParticipantsChange::Add => "add",
GroupParticipantsChange::Remove => "remote",
GroupParticipantsChange::Promote => "promote",
GroupParticipantsChange::Demote => "demote"
}
}
}
impl PresenceStatus {
fn into_node(self) -> &'static str {
match self {
PresenceStatus::Unavailable => "unavailable",
PresenceStatus::Available => "available",
PresenceStatus::Typing => "composing",
PresenceStatus::Recording => "recording",
}
}
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/media.rs | src/media.rs | extern crate base64;
extern crate json;
extern crate image;
use std::io::Cursor;
use std::thread;
use std::sync::Arc;
use json_protocol::JsonNonNull;
use image::{GenericImage, RGB};
use image::jpeg::JPEGEncoder;
use reqwest;
use MediaType;
use crypto;
use message::FileInfo;
use connection::{WhatsappWebConnection, WhatsappWebHandler};
use errors::*;
pub fn generate_thumbnail_and_get_size(image: &[u8]) -> (Vec<u8>, (u32, u32)) {
let image = image::load_from_memory(image).unwrap();
let size = (image.height(), image.width());
let thumbnail = image.thumbnail(160, 160).to_rgb();
let mut thumbnail_writter = Cursor::new(Vec::new());
JPEGEncoder::new(&mut thumbnail_writter).encode(&thumbnail, thumbnail.width(), thumbnail.height(), RGB(8)).unwrap();
(thumbnail_writter.into_inner(), size)
}
/// Download file from servers and decrypt it
pub fn download_file(file_info: FileInfo, media_type: MediaType, callback: Box<Fn(Result<Vec<u8>>) + Send + Sync>) {
thread::spawn(move || {
let mut file_enc = Cursor::new(Vec::with_capacity(file_info.size));
callback(reqwest::get(&file_info.url)
.map_err(|e| Error::with_chain(e, "could not load file"))
.and_then(|mut response| {
let status = response.status();
if status.is_success() {
response.copy_to(&mut file_enc)
.map_err(|e| Error::with_chain(e, "could not load file"))
} else {
bail!{"received http status code {}", status.as_u16()}
}
})
.and_then(|_| crypto::decrypt_media_message(&file_info.key, media_type, &file_enc.into_inner())));
});
}
/// Upload file to servers and encrypt it
pub fn upload_file<H>(file: &[u8], media_type: MediaType, connection: &WhatsappWebConnection<H>, callback: Box<Fn(Result<FileInfo>) + Send + Sync>)
where H: WhatsappWebHandler + Send + Sync + 'static {
let file_hash = crypto::sha256(file);
let file_hash = Arc::new(file_hash);
let callback = Arc::new(callback);
let (file_encrypted, media_key) = crypto::encrypt_media_message(media_type, file);
let file_encrypted_hash = crypto::sha256(&file_encrypted);
//Todo refactoring, remove arc -> request_file_upload fnonce
let file_encrypted_hash = Arc::new(file_encrypted_hash);
let file_encrypted = Arc::new(file_encrypted);
let media_key = Arc::new(media_key);
let file_len = file.len();
connection.request_file_upload(&file_hash.clone(), media_type, Box::new(move |url: Result<&str>| {
match url {
Ok(url) => {
let url = url.to_string();
let file_hash = file_hash.clone();
let file_encrypted_hash = file_encrypted_hash.clone();
let file_encrypted = file_encrypted.clone();
let media_key = media_key.clone();
let callback = callback.clone();
thread::spawn(move || {
let form = reqwest::multipart::Form::new()
.text("hash", base64::encode(&file_encrypted_hash.to_vec()))
.part("file", reqwest::multipart::Part::reader(Cursor::new(file_encrypted.to_vec()))
.mime(reqwest::mime::APPLICATION_OCTET_STREAM));
let file_info = reqwest::Client::new().post(url.as_str())
.multipart(form)
.send()
.and_then(|mut response| response.text())
.map_err(|e| Error::with_chain(e, "could not upload file"))
.and_then(|response| json::parse(&response).map_err(|e| (Error::with_chain(e, "invalid response"))))
.and_then(|json| json.get_str("url").map(|url| url.to_string()))
.map(|url| FileInfo {
mime: "image/jpeg".to_string(),
sha256: file_hash.to_vec(),
enc_sha256: file_encrypted_hash.to_vec(),
key: media_key.to_vec(),
url,
size: file_len, //Or encrypted file size ??
});
callback(file_info);
});
}
Err(err) => callback(Err(err).chain_err(|| "could not request file upload"))
}
}))
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/websocket_protocol.rs | src/websocket_protocol.rs | use std::str;
use std::borrow::Cow;
use std::borrow::Borrow;
use std::ops::Deref;
use ws::Message;
use json;
use json::JsonValue;
#[derive(Copy, Clone, PartialEq)]
#[allow(dead_code)]
pub enum WebsocketMessageMetric {
None = 0,
DebugLog = 1,
QueryResume = 2,
QueryReceipt = 3,
QueryMedia = 4,
QueryChat = 5,
QueryContacts = 6,
QueryMessages = 7,
Presence = 8,
PresenceSubscribe = 9,
Group = 10,
Read = 11,
Chat = 12,
Received = 13,
Pic = 14,
Status = 15,
Message = 16,
QueryActions = 17,
Block = 18,
QueryGroup = 19,
QueryPreview = 20,
QueryEmoji = 21,
QueryMessageInfo = 22,
Spam = 23,
QuerySearch = 24,
QueryIdentity = 25,
QueryUrl = 26,
Profile = 27,
Contact = 28,
QueryVcard = 29,
QueryStatus = 30,
QueryStatusUpdate = 31,
PrivacyStatus = 32,
QueryLiveLocations = 33,
LiveLocation = 34,
QueryVname = 35,
QueryLabels = 36,
Call = 37,
QueryCall = 38,
QueryQuickReplies = 39
}
pub struct WebsocketMessage<'a> {
pub tag: Cow<'a, str>,
pub payload: WebsocketMessagePayload<'a>
}
pub enum WebsocketMessagePayload<'a> {
Json(JsonValue),
BinarySimple(&'a [u8]),
BinaryEphemeral(WebsocketMessageMetric, &'a [u8]),
Empty,
Pong
}
impl<'a> WebsocketMessage<'a> {
#[inline]
pub fn serialize(&self) -> Message {
match self.payload {
WebsocketMessagePayload::Json(ref json) => {
Message::Text([self.tag.deref(), ",", json.to_string().as_str()].concat())
}
WebsocketMessagePayload::BinarySimple(ref binary) => {
Message::Binary([self.tag.deref().as_bytes(), b",", binary].concat())
}
WebsocketMessagePayload::BinaryEphemeral(metric, ref binary) => {
if metric != WebsocketMessageMetric::None {
Message::Binary([self.tag.deref().as_bytes(), b",", &[metric as u8], b"\x80", binary].concat())
} else {
Message::Binary([self.tag.deref().as_bytes(), b",,", binary].concat())
}
}
WebsocketMessagePayload::Empty => {
Message::Text([self.tag.borrow(), ","].concat())
}
WebsocketMessagePayload::Pong => unimplemented!()
}
}
#[inline]
pub fn deserialize(message: &'a Message) -> Result<WebsocketMessage<'a>, ()> {
match *message {
Message::Text(ref message) => {
if let Some(sep) = message.find(',') {
let (tag_str, payload) = message.split_at(sep + 1);
let tag = Cow::Borrowed(tag_str.split_at(sep).0);
Ok(if payload.is_empty() {
WebsocketMessage { tag, payload: WebsocketMessagePayload::Empty }
} else {
WebsocketMessage { tag, payload: WebsocketMessagePayload::Json(json::parse(payload).map_err(|_| ())?) }
})
} else if message.get(0..1).map_or(false, |first| first == "!") {
Ok(WebsocketMessage { tag: Cow::Borrowed(""), payload: WebsocketMessagePayload::Pong })
} else {
Err(())
}
}
Message::Binary(ref message) => {
if let Some(sep) = message.iter().position(|x| x == &b',') {
Ok(WebsocketMessage {
tag: Cow::Borrowed(str::from_utf8(&message[..sep]).map_err(|_| ())?),
payload: WebsocketMessagePayload::BinarySimple(&message[(sep + 1)..])
})
} else {
Err(())
}
}
}
}
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/connection.rs | src/connection.rs | use std::sync::Mutex;
use std::collections::HashMap;
use std::thread;
use std::thread::JoinHandle;
use std::marker::Send;
use std::sync::Arc;
use std::clone::Clone;
use std::ops::Deref;
use ws;
use ws::{CloseCode, Handler, Request, Sender, Message};
use ring::agreement;
use ring::rand::{SystemRandom, SecureRandom};
use url::Url;
use qrcode::QrCode;
use base64;
use json::JsonValue;
use ws::util::{Token, Timeout};
use std::time::{SystemTime, Duration};
use chrono::{NaiveDateTime, Utc};
use crypto;
use message::{ChatMessage as WhatsappMessage, MessageAck, ChatMessageContent, Peer, Direction, MessageId};
use timeout;
use json_protocol;
use json_protocol::ServerMessage;
use websocket_protocol::{WebsocketMessage, WebsocketMessagePayload, WebsocketMessageMetric};
use node_protocol;
use node_protocol::{AppMessage, MessageEventType, AppEvent, Query, GroupCommand};
use node_wire::Node;
use super::{Jid, PresenceStatus, Contact, Chat, GroupMetadata, GroupParticipantsChange, ChatAction, MediaType};
use errors::*;
pub struct WhatsappWebConnection<H: WhatsappWebHandler + Send + Sync + 'static> {
inner: Arc<Mutex<WhatsappWebConnectionInner<H>>>,
//Todo
handler: Arc<H>
}
impl<H: WhatsappWebHandler + Send + Sync + 'static> Clone for WhatsappWebConnection<H> {
fn clone(&self) -> Self {
WhatsappWebConnection { handler: self.handler.clone(), inner: self.inner.clone() }
}
}
#[derive(Eq, PartialEq, Clone, Copy, Debug)]
pub enum State {
Uninitialized = 0,
Connected = 1,
Disconnecting = 2,
Reconnecting = 3
}
pub enum DisconnectReason {
Replaced,
Removed
}
#[derive(Debug)]
pub enum UserData {
/// Contacts are initial send by the app
ContactsInitial(Vec<Contact>),
/// Contact is added or changed
ContactAddChange(Contact),
/// Contact is removed
ContactDelete(Jid),
/// Chats are initial send by the app
Chats(Vec<Chat>),
ChatAction(Jid, ChatAction),
/// Jid of the own user
UserJid(Jid),
PresenceChange(Jid, PresenceStatus, Option<NaiveDateTime>),
MessageAck(MessageAck),
GroupIntroduce { newly_created: bool, inducer: Jid, meta: GroupMetadata },
GroupParticipantsChange { group: Jid, change: GroupParticipantsChange, inducer: Option<Jid>, participants: Vec<Jid> },
/// Batterylevel which is submitted by the app
Battery(u8)
}
pub trait WhatsappWebHandler<H = Self> where H: WhatsappWebHandler<H> + Send + Sync + 'static {
fn on_state_changed(&self, connection: &WhatsappWebConnection<H>, state: State);
fn on_user_data_changed(&self, connection: &WhatsappWebConnection<H>, user_data: UserData);
fn on_persistent_session_data_changed(&self, persistent_session: PersistentSession);
fn on_disconnect(&self, reason: DisconnectReason);
fn on_message(&self, connection: &WhatsappWebConnection<H>, message_new: bool, message: Box<WhatsappMessage>);
}
enum SessionState {
PendingNew { private_key: Option<agreement::EphemeralPrivateKey>, public_key: Vec<u8>, client_id: [u8; 8], qr_callback: Box<Fn(QrCode) + Send> },
PendingPersistent { persistent_session: PersistentSession },
Established { persistent_session: PersistentSession },
Teardown
}
enum WebsocketState {
Disconnected,
Connected(Sender, timeout::TimeoutManager)
}
enum WebsocketResponse {
Json(JsonValue),
Node(Node)
}
struct WhatsappWebConnectionInner<H: WhatsappWebHandler<H> + Send + Sync + 'static> {
pub user_jid: Option<Jid>,
requests: HashMap<String, Box<Fn(WebsocketResponse, &WhatsappWebConnection<H>) + Send>>,
messages_tag_counter: u32,
session_state: SessionState,
websocket_state: WebsocketState,
epoch: u32
}
impl<H: WhatsappWebHandler<H> + Send + Sync + 'static> WhatsappWebConnectionInner<H> {
fn send_json_message(&mut self, message: JsonValue, cb: Box<Fn(JsonValue, &WhatsappWebConnection<H>) + Send>) {
debug!("sending json {:?}", &message);
let tag = self.alloc_message_tag();
self.ws_send_message(WebsocketMessage {
tag: tag.into(),
payload: WebsocketMessagePayload::Json(message)
}, Box::new(move |payload, conn| {
if let WebsocketResponse::Json(json) = payload {
cb(json, conn)
}
}));
}
fn send_group_command(&mut self, command: GroupCommand, participants: Vec<Jid>) {
let tag = self.alloc_message_tag();
let app_event = AppEvent::GroupCommand { inducer: self.user_jid.clone().unwrap(), participants, id: tag.clone(), command };
self.send_app_message(Some(tag), WebsocketMessageMetric::Group, AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![app_event]),
Box::new(|_, _| {}));
}
fn send_app_message(&mut self, tag: Option<String>, metric: WebsocketMessageMetric, app_message: AppMessage, cb: Box<Fn(WebsocketResponse, &WhatsappWebConnection<H>) + Send>) {
self.epoch += 1;
let epoch = self.epoch;
self.send_node_message(tag, metric, app_message.serialize(epoch), cb);
}
#[inline]
fn send_node_message(&mut self, tag: Option<String>, metric: WebsocketMessageMetric, node: Node, cb: Box<Fn(WebsocketResponse, &WhatsappWebConnection<H>) + Send>) {
debug!("sending node {:?}", &node);
self.send_binary_message(tag, metric, &node.serialize(), cb);
}
fn ws_send_message(&mut self, message: WebsocketMessage, callback: Box<Fn(WebsocketResponse, &WhatsappWebConnection<H>) + Send>) {
if let WebsocketState::Connected(ref sender, _) = self.websocket_state {
sender.send(message.serialize()).unwrap();
self.requests.insert(message.tag.into(), callback);
}
}
fn alloc_message_tag(&mut self) -> String {
let tag = self.messages_tag_counter;
self.messages_tag_counter += 1;
tag.to_string()
}
fn send_binary_message(&mut self, tag: Option<String>, metric: WebsocketMessageMetric, message: &[u8], cb: Box<Fn(WebsocketResponse, &WhatsappWebConnection<H>) + Send>) {
let encrypted_message = if let SessionState::Established { ref persistent_session } = self.session_state {
crypto::sign_and_encrypt_message(&persistent_session.enc, &persistent_session.mac, &message)
} else {
return;
};
let tag = tag.unwrap_or_else(|| self.alloc_message_tag());
self.ws_send_message(WebsocketMessage {
tag: tag.into(),
payload: WebsocketMessagePayload::BinaryEphemeral(metric, &encrypted_message)
}, cb);
}
fn decrypt_binary_message(&mut self, encrypted_message: &[u8]) -> Result<Vec<u8>> {
if let SessionState::Established { ref persistent_session } = self.session_state {
crypto::verify_and_decrypt_message(&persistent_session.enc[..], &persistent_session.mac[..], &encrypted_message)
} else {
bail!{"connection not established yet"}
}
}
fn handle_server_conn(&mut self, user_jid: Jid, client_token: &str, server_token: &str, secret: Option<&str>) -> Result<(PersistentSession, Jid)> {
let (new_session_state, persistent_session, user_jid) = match self.session_state {
SessionState::PendingNew { ref mut private_key, ref client_id, .. } => {
let secret = base64::decode(secret.ok_or(ErrorKind::JsonFieldMissing("secret"))?)?;
let (enc, mac) = crypto::calculate_secret_keys(&secret, private_key.take().unwrap())?;
self.user_jid = Some(user_jid);
let persistent_session = PersistentSession {
client_token: client_token.to_string(),
server_token: server_token.to_string(),
client_id: *client_id,
enc,
mac
};
(SessionState::Established { persistent_session: persistent_session.clone() }, persistent_session, self.user_jid.clone())
}
SessionState::PendingPersistent { ref persistent_session } => {
self.user_jid = Some(user_jid);
let new_persistent_session = PersistentSession {
client_id: persistent_session.client_id,
enc: persistent_session.enc,
mac: persistent_session.mac,
client_token: client_token.to_string(),
server_token: server_token.to_string()
};
(SessionState::Established { persistent_session: new_persistent_session.clone() }, new_persistent_session, self.user_jid.clone())
}
_ => { bail!{"Session already established but received conn packet"} }
};
self.session_state = new_session_state;
Ok((persistent_session, user_jid.unwrap()))
}
fn on_timeout(&mut self, event: Token) {
if let WebsocketState::Connected(ref sender, ref mut timeout_manager) = self.websocket_state {
match timeout_manager.on_timeout(event) {
Some(timeout::TimeoutState::Normal) => {
sender.send(Message::Text("?,,".to_string())).ok();
timeout_manager.arm(&sender, timeout::RESPONSE_TIMEOUT, timeout::TimeoutState::Deathline);
}
Some(timeout::TimeoutState::Deathline) => {
sender.close(CloseCode::Abnormal).ok();
}
_ => {}
}
} else {
unreachable!();
}
}
fn handle_server_challenge(&mut self, challenge: &[u8]) {
let message = if let SessionState::PendingPersistent { ref persistent_session } = self.session_state {
let signature = crypto::sign_challenge(&persistent_session.mac, challenge);
json_protocol::build_challenge_response(persistent_session.server_token.as_str(), &base64::encode(&persistent_session.client_id), signature.as_ref())
} else {
return;
};
self.send_json_message(message, Box::new(move |_, _| {}));
}
fn handle_server_disconnect(&mut self) {
self.session_state = SessionState::Teardown;
}
fn ws_on_connected(&mut self, out: Sender) {
let timeout_manager = timeout::TimeoutManager::new(&out, timeout::PING_TIMEOUT, timeout::TimeoutState::Normal);
self.websocket_state = match self.websocket_state {
WebsocketState::Disconnected => WebsocketState::Connected(out, timeout_manager),
WebsocketState::Connected(_, _) => return
};
let message: (JsonValue, Box<Fn(JsonValue, &WhatsappWebConnection<H>) + Send>) = match self.session_state {
SessionState::PendingNew { ref client_id, .. } => {
let mut init_command = json_protocol::build_init_request(base64::encode(&client_id).as_str());
(init_command, Box::new(move |response, connection| {
if let Ok(reference) = json_protocol::parse_init_response(&response) {
match connection.inner.lock().unwrap().session_state {
SessionState::PendingNew { ref public_key, ref client_id, ref qr_callback, .. } => {
debug!("QRCode: {}", reference);
qr_callback(QrCode::new(
format!("{},{},{}", reference, base64::encode(&public_key), base64::encode(&client_id))
).unwrap());
}
_ => {
unreachable!()
}
}
} else {
error!("error");
}
}))
}
SessionState::PendingPersistent { ref persistent_session } => {
let mut init_command = json_protocol::build_init_request(base64::encode(&persistent_session.client_id).as_str());
(init_command, Box::new(move |response, connection| {
if let Err(err) = json_protocol::parse_response_status(&response) {
error!("error {:?}", err);
} else {
let mut inner = connection.inner.lock().unwrap();
let message: (JsonValue, Box<Fn(JsonValue, &WhatsappWebConnection<H>) + Send>) = match inner.session_state {
SessionState::PendingPersistent { ref persistent_session } => {
let mut login_command = json_protocol::build_takeover_request(persistent_session.client_token.as_str(),
persistent_session.server_token.as_str(),
&base64::encode(&persistent_session.client_id));
(login_command, Box::new(move |response, connection| {
if let Err(err) = json_protocol::parse_response_status(&response) {
error!("error {:?}", err);
connection.ws_disconnect();
connection.handler.on_disconnect(DisconnectReason::Removed);
}
}))
}
_ => unreachable!()
};
inner.send_json_message(message.0, message.1);
}
}))
}
_ => { unreachable!() }
};
self.send_json_message(message.0, message.1);
}
}
impl<H: WhatsappWebHandler<H> + Send + Sync> WhatsappWebConnection<H> {
fn new<Q: Fn(QrCode) + Send + 'static>(qr_callback: Box<Q>, handler: H) -> WhatsappWebConnection<H> {
let mut client_id = [0u8; 8];
SystemRandom::new().fill(&mut client_id).unwrap();
let (private_key, public_key) = crypto::generate_keypair();
WhatsappWebConnection {
handler: Arc::new(handler),
inner: Arc::new(Mutex::new(WhatsappWebConnectionInner {
user_jid: None,
websocket_state: WebsocketState::Disconnected,
requests: HashMap::new(),
messages_tag_counter: 0,
session_state: SessionState::PendingNew {
private_key: Some(private_key),
public_key,
client_id,
qr_callback
},
epoch: 0
}))
}
}
fn with_persistent_session(persistent_session: PersistentSession, handler: H) -> WhatsappWebConnection<H> {
WhatsappWebConnection {
handler: Arc::new(handler),
inner: Arc::new(Mutex::new(WhatsappWebConnectionInner {
user_jid: None,
websocket_state: WebsocketState::Disconnected,
requests: HashMap::new(),
messages_tag_counter: 0,
session_state: SessionState::PendingPersistent {
persistent_session
},
epoch: 0
}))
}
}
fn send_json_message(&self, message: JsonValue, cb: Box<Fn(JsonValue, &WhatsappWebConnection<H>) + Send>) {
self.inner.lock().unwrap().send_json_message(message, cb);
}
fn send_app_message(&self, tag: Option<String>, metric: WebsocketMessageMetric, app_message: AppMessage, cb: Box<Fn(WebsocketResponse, &WhatsappWebConnection<H>) + Send>) {
self.inner.lock().unwrap().send_app_message(tag, metric, app_message, cb)
}
fn ws_on_disconnected(&self) {
let mut inner = self.inner.lock().unwrap();
inner.websocket_state = WebsocketState::Disconnected;
inner.session_state = match inner.session_state {
SessionState::Established { ref persistent_session } => {
SessionState::PendingPersistent { persistent_session: persistent_session.clone() }
}
_ => return
};
drop(inner);
self.handler.on_state_changed(self, State::Reconnecting);
}
fn ws_on_message(&self, message: &Message) {
trace!("received websocket message {:?}", message);
let mut inner = self.inner.lock().unwrap();
if let WebsocketState::Connected(ref out, ref mut timeout_manager) = inner.websocket_state {
timeout_manager.arm(out, timeout::PING_TIMEOUT, timeout::TimeoutState::Normal);
} else {
return;
}
let message = WebsocketMessage::deserialize(message).unwrap();
match message.payload {
WebsocketMessagePayload::Json(payload) => {
debug!("received json: {:?}", &payload);
if let Some(cb) = inner.requests.remove(message.tag.deref()) {
drop(inner);
cb(WebsocketResponse::Json(payload), &self);
} else {
match ServerMessage::deserialize(&payload) {
Ok(ServerMessage::ConnectionAck { user_jid, client_token, server_token, secret }) => {
if let Ok((persistent_session, user_jid)) = inner.handle_server_conn(user_jid, client_token, server_token, secret) {
drop(inner);
self.handler.on_state_changed(self, State::Connected);
self.handler.on_persistent_session_data_changed(persistent_session);
self.handler.on_user_data_changed(&self, UserData::UserJid(user_jid));
}
}
Ok(ServerMessage::ChallengeRequest(challenge)) => {
inner.handle_server_challenge(&challenge)
}
Ok(ServerMessage::Disconnect(kind)) => {
inner.handle_server_disconnect();
drop(inner);
self.handler.on_state_changed(self, State::Disconnecting);
self.handler.on_disconnect(if kind.is_some() {
DisconnectReason::Replaced
} else {
DisconnectReason::Removed
});
}
Ok(ServerMessage::PresenceChange { jid, status, time }) => {
drop(inner);
let presence_change = UserData::PresenceChange(
jid,
status,
time.and_then(|timestamp| if timestamp != 0 {
Some(NaiveDateTime::from_timestamp(timestamp, 0))
} else {
None
})
);
self.handler.on_user_data_changed(self, presence_change);
}
Ok(ServerMessage::MessageAck { message_id, level, sender, receiver, participant, time }) => {
self.handler.on_user_data_changed(self, UserData::MessageAck(MessageAck::from_server_message(
message_id,
level,
sender,
receiver,
participant,
time,
inner.user_jid.as_ref().unwrap()
)))
}
Ok(ServerMessage::MessageAcks { message_ids, level, sender, receiver, participant, time }) => {
for message_id in message_ids {
self.handler.on_user_data_changed(self, UserData::MessageAck(MessageAck::from_server_message(
message_id,
level,
sender.clone(),
receiver.clone(),
participant.clone(),
time,
inner.user_jid.as_ref().unwrap()
)))
}
}
Ok(ServerMessage::GroupIntroduce { newly_created, inducer, meta }) => {
drop(inner);
self.handler.on_user_data_changed(self, UserData::GroupIntroduce { newly_created, inducer, meta });
}
Ok(ServerMessage::GroupParticipantsChange { group, change, inducer, participants }) => {
drop(inner);
self.handler.on_user_data_changed(self, UserData::GroupParticipantsChange { group, change, inducer, participants });
}
_ => {}
}
}
}
WebsocketMessagePayload::BinarySimple(encrypted_payload) => {
let payload = Node::deserialize(&inner.decrypt_binary_message(encrypted_payload).unwrap()).unwrap();
debug!("received node: {:?}", &payload);
if let Some(cb) = inner.requests.remove(message.tag.deref()) {
drop(inner);
cb(WebsocketResponse::Node(payload), &self);
} else {
match AppMessage::deserialize(payload) {
Ok(AppMessage::Contacts(contacts)) => {
drop(inner);
self.handler.on_user_data_changed(self, UserData::ContactsInitial(contacts));
}
Ok(AppMessage::Chats(chats)) => {
drop(inner);
self.handler.on_user_data_changed(self, UserData::Chats(chats));
}
Ok(AppMessage::MessagesEvents(event_type, events)) => {
drop(inner);
for event in events {
match event {
AppEvent::Message(message) => self.handler.on_message(self, event_type == Some(MessageEventType::Relay), message),
AppEvent::MessageAck(message_ack) => self.handler.on_user_data_changed(self, UserData::MessageAck(message_ack)),
AppEvent::ContactDelete(jid) => self.handler.on_user_data_changed(self, UserData::ContactDelete(jid)),
AppEvent::ContactAddChange(contact) => self.handler.on_user_data_changed(self, UserData::ContactAddChange(contact)),
AppEvent::ChatAction(jid, action) => self.handler.on_user_data_changed(self, UserData::ChatAction(jid, action)),
AppEvent::Battery(level) => self.handler.on_user_data_changed(self, UserData::Battery(level)),
AppEvent::MessageRead { .. } => unreachable!(),
AppEvent::MessagePlayed { .. } => unreachable!(),
AppEvent::GroupCommand { .. } => unreachable!(),
AppEvent::PresenceChange(_, _) => unreachable!(),
AppEvent::StatusChange(_) => unreachable!(),
AppEvent::NotifyChange(_) => unreachable!(),
AppEvent::BlockProfile { .. } => unreachable!(),
}
}
}
_ => {}
}
}
}
_ => {}
}
}
pub fn send_message_played(&self, id: MessageId, peer: Peer) {
let mut inner = self.inner.lock().unwrap();
inner.epoch += 1;
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::MessagePlayed { id, peer }]);
self.send_app_message(None, WebsocketMessageMetric::Received, msg, Box::new(|_, _| {}));
}
pub fn send_message_read(&self, id: MessageId, peer: Peer) {
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::MessageRead { id, peer }]);
self.send_app_message(None, WebsocketMessageMetric::Read, msg, Box::new(|_, _| {}));
}
pub fn set_presence(&self, presence: PresenceStatus, jid: Option<Jid>) {
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::PresenceChange(presence, jid)]);
self.send_app_message(None, WebsocketMessageMetric::Presence, msg, Box::new(|_, _| {}));
}
pub fn set_status(&self, status: String) {
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::StatusChange(status)]);
self.send_app_message(None, WebsocketMessageMetric::Status, msg, Box::new(|_, _| {}));
}
pub fn set_notify_name(&self, name: String) {
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::NotifyChange(name)]);
self.send_app_message(None, WebsocketMessageMetric::Profile, msg, Box::new(|_, _| {}));
}
pub fn block_profile(&self, unblock: bool, jid: Jid) {
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::BlockProfile { unblock, jid }]);
self.send_app_message(None, WebsocketMessageMetric::Block, msg, Box::new(|_, _| {}));
}
pub fn send_chat_action(&self, action: ChatAction, chat: Jid) {
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Set), vec![AppEvent::ChatAction(chat, action)]);
self.send_app_message(None, WebsocketMessageMetric::Chat, msg, Box::new(|_, _| {}));
}
pub fn send_message(&self, message_content: ChatMessageContent, jid: Jid) {
let message_id = MessageId::generate();
let msg = AppMessage::MessagesEvents(Some(MessageEventType::Relay), vec![AppEvent::Message(Box::new(WhatsappMessage {
content: message_content,
time: Utc::now().naive_utc(),
direction: Direction::Sending(jid),
id: message_id.clone()
}))]);
self.send_app_message(Some(message_id.0), WebsocketMessageMetric::Message, msg, Box::new(|_, _| {}));
}
pub fn group_create(&self, subject: String, participants: Vec<Jid>) {
self.inner.lock().unwrap().send_group_command(GroupCommand::Create(subject), participants);
}
pub fn group_participants_change(&self, jid: Jid, participants_change: GroupParticipantsChange, participants: Vec<Jid>) {
self.inner.lock().unwrap().send_group_command(GroupCommand::ParticipantsChange(jid, participants_change), participants);
}
pub fn get_messages_before(&self, jid: Jid, id: String, count: u16, callback: Box<Fn(Option<Vec<WhatsappMessage>>) + Send + Sync>) {
let msg = AppMessage::Query(Query::MessagesBefore { jid, id, count });
self.send_app_message(None, WebsocketMessageMetric::QueryMessages, msg, Box::new(move |response, _| {
match response {
WebsocketResponse::Node(node) => {
callback(node_protocol::parse_message_response(node).ok());
}
_ => unimplemented!()
}
}));
}
pub fn request_file_upload(&self, hash: &[u8], media_type: MediaType, callback: Box<Fn(Result<&str>) + Send + Sync>) {
self.send_json_message(json_protocol::build_file_upload_request(hash, media_type), Box::new(move |response, _| {
callback(json_protocol::parse_file_upload_response(&response));
}));
}
pub fn get_profile_picture(&self, jid: &Jid, callback: Box<Fn(Option<&str>) + Send + Sync>) {
self.send_json_message(json_protocol::build_profile_picture_request(jid), Box::new(move |response, _| {
callback(json_protocol::parse_profile_picture_response(&response));
}));
}
pub fn get_profile_status(&self, jid: &Jid, callback: Box<Fn(Option<&str>) + Send + Sync>) {
self.send_json_message(json_protocol::build_profile_status_request(jid), Box::new(move |response, _| {
callback(json_protocol::parse_profile_status_response(&response));
}));
}
pub fn get_group_metadata(&self, jid: &Jid, callback: Box<Fn(Option<GroupMetadata>) + Send + Sync>) {
debug_assert!(jid.is_group);
self.send_json_message(json_protocol::build_group_metadata_request(jid), Box::new(move |response, _| {
callback(json_protocol::parse_group_metadata_response(&response).ok());
}));
}
fn ws_connect(&self) -> JoinHandle<()> {
let whatsapp_connection = self.clone();
thread::spawn(move || loop {
let last_try = SystemTime::now();
let whatsapp_connection1 = whatsapp_connection.clone();
ws::connect(ENDPOINT_URL, move |out| {
whatsapp_connection1.inner.lock().unwrap().ws_on_connected(out);
WsHandler {
whatsapp_connection: whatsapp_connection1.clone()
}
}).unwrap();
if let SessionState::Teardown = whatsapp_connection.inner.lock().unwrap().session_state {
break
}
let duration = SystemTime::now().duration_since(last_try).unwrap_or_else(|_|Duration::new(0, 0));
if let Some(duration) = Duration::new(10, 0).checked_sub(duration) {
thread::sleep(duration);
}
})
}
pub fn ws_disconnect(&self) {
self.handler.on_state_changed(self, State::Disconnecting);
let mut inner = self.inner.lock().unwrap();
inner.session_state = SessionState::Teardown;
if let WebsocketState::Connected(ref out, ref mut timeout_manager) = inner.websocket_state {
out.close(CloseCode::Normal).ok();
timeout_manager.disarm();
}
}
pub fn subscribe_presence(&self, jid: &Jid) {
self.send_json_message(json_protocol::build_presence_subscribe(jid), Box::new(|_, _| {}));
}
pub fn state(&self) -> State {
match self.inner.lock().unwrap().session_state {
SessionState::PendingNew { .. } => State::Uninitialized,
SessionState::PendingPersistent { .. } => State::Reconnecting,
SessionState::Established { .. } => State::Connected,
SessionState::Teardown => State::Disconnecting
}
}
}
struct WsHandler<H: WhatsappWebHandler<H> + Send + Sync + 'static> {
whatsapp_connection: WhatsappWebConnection<H>
}
impl<H: WhatsappWebHandler<H> + Send + Sync + 'static> Handler for WsHandler<H> {
fn build_request(&mut self, url: &Url) -> ws::Result<Request> {
trace!("Handler is building request to {}.", url);
let mut request = Request::from_url(url)?;
request.headers_mut().push(("Origin".to_string(), b"https://web.whatsapp.com".to_vec()));
Ok(request)
}
fn on_message(&mut self, msg: Message) -> ws::Result<()> {
debug!("Received message {:?}", msg);
self.whatsapp_connection.ws_on_message(&msg);
Ok(())
}
fn on_timeout(&mut self, event: Token) -> ws::Result<()> {
let mut inner = self.whatsapp_connection.inner.lock().unwrap();
inner.on_timeout(event);
Ok(())
}
fn on_new_timeout(&mut self, event: Token, timeout: Timeout) -> ws::Result<()> {
if let WebsocketState::Connected(_, ref mut timeout_manager) = self.whatsapp_connection.inner.lock().unwrap().websocket_state {
timeout_manager.on_new_timeout(event, timeout);
}
Ok(())
}
fn on_close(&mut self, _: CloseCode, _: &str) {
self.whatsapp_connection.ws_on_disconnected();
}
}
/// Stores the parameters to login without scanning the qrcode again.
#[derive(Serialize, Deserialize, PartialEq, Debug, Clone)]
pub struct PersistentSession {
pub client_token: String,
pub server_token: String,
pub client_id: [u8; 8],
pub enc: [u8; 32],
pub mac: [u8; 32]
}
const ENDPOINT_URL: &str = "wss://w7.web.whatsapp.com/ws";
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | true |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/node_wire.rs | src/node_wire.rs | use std::collections::HashMap;
use std::io::{Read, Write, Cursor};
use std::char;
use std::borrow::Cow;
use std::ops::Deref;
use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt};
use Jid;
use errors::*;
const LIST_EMPTY: u8 = 0;
#[allow(dead_code)]
const STREAM_END: u8 = 2;
const DICTIONARY_0: u8 = 236;
const DICTIONARY_1: u8 = 237;
const DICTIONARY_2: u8 = 238;
const DICTIONARY_3: u8 = 239;
const LIST_8: u8 = 248;
const LIST_16: u8 = 249;
const JID_PAIR: u8 = 250;
const HEX_8: u8 = 251;
const BINARY_8: u8 = 252;
const BINARY_20: u8 = 253;
const BINARY_32: u8 = 254;
const NIBBLE_8: u8 = 255;
#[allow(dead_code)]
const PACKED_MAX: u8 = 254;
const TOKENS: [&str; 159] = ["200", "400", "404", "500", "501", "502", "action", "add",
"after", "archive", "author", "available", "battery", "before", "body",
"broadcast", "chat", "clear", "code", "composing", "contacts", "count",
"create", "debug", "delete", "demote", "duplicate", "encoding", "error",
"false", "filehash", "from", "g.us", "group", "groups_v2", "height", "id",
"image", "in", "index", "invis", "item", "jid", "kind", "last", "leave",
"live", "log", "media", "message", "mimetype", "missing", "modify", "name",
"notification", "notify", "out", "owner", "participant", "paused",
"picture", "played", "presence", "preview", "promote", "query", "raw",
"read", "receipt", "received", "recipient", "recording", "relay",
"remove", "response", "resume", "retry", "c.us", "seconds",
"set", "size", "status", "subject", "subscribe", "t", "text", "to", "true",
"type", "unarchive", "unavailable", "url", "user", "value", "web", "width",
"mute", "read_only", "admin", "creator", "short", "update", "powersave",
"checksum", "epoch", "block", "previous", "409", "replaced", "reason",
"spam", "modify_tag", "message_info", "delivery", "emoji", "title",
"description", "canonical-url", "matched-text", "star", "unstar",
"media_key", "filename", "identity", "unread", "page", "page_count",
"search", "media_message", "security", "call_log", "profile", "ciphertext",
"invite", "gif", "vcard", "frequent", "privacy", "blacklist", "whitelist",
"verify", "location", "document", "elapsed", "revoke_invite", "expiration",
"unsubscribe", "disable", "vname", "old_jid", "new_jid", "announcement",
"locked", "prop", "label", "color", "call", "offer", "call-id"
];
#[derive(Debug, PartialEq, Clone)]
pub enum NodeContent {
None,
List(Vec<Node>),
String(Cow<'static, str>),
Binary(Vec<u8>),
Jid(Jid),
Token(&'static str),
Nibble(Cow<'static, str>),
}
impl NodeContent {
pub fn into_cow(self) -> Cow<'static, str> {
match self {
NodeContent::None => "".cow(),
NodeContent::List(_) => unimplemented!(),
NodeContent::String(string) => string,
NodeContent::Nibble(string) => string,
NodeContent::Binary(_) => unimplemented!(),
NodeContent::Jid(jid) => Cow::Owned(jid.to_string()),
NodeContent::Token(ref token) => Cow::Borrowed(token)
}
}
pub fn into_string(self) -> String {
match self {
NodeContent::None => "".to_string(),
NodeContent::List(_) => unimplemented!(),
NodeContent::String(string) => string.into(),
NodeContent::Nibble(string) => string.into(),
NodeContent::Binary(_) => unimplemented!(),
NodeContent::Jid(jid) => jid.to_string(),
NodeContent::Token(ref token) => token.to_string()
}
}
pub fn into_jid(self) -> Result<Jid> {
match self {
NodeContent::Jid(jid) => Ok(jid),
_ => bail! {"not a jid"}
}
}
pub fn as_str(&self) -> &str {
match *self {
NodeContent::None => "",
NodeContent::List(_) => unimplemented!(),
NodeContent::String(ref string) => string.deref(),
NodeContent::Nibble(ref string) => string.deref(),
NodeContent::Binary(_) => unimplemented!(),
NodeContent::Jid(_) => unimplemented!(),//jid.to_string().as_str()
NodeContent::Token(ref token) => token
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct Node {
pub desc: Cow<'static, str>,
pub attributes: HashMap<Cow<'static, str>, NodeContent>,
pub content: NodeContent,
}
fn read_list_size(tag: u8, stream: &mut Read) -> Result<u16> {
Ok(match tag {
LIST_EMPTY => 0,
LIST_8 => u16::from(stream.read_u8()?),
LIST_16 => stream.read_u16::<BigEndian>()?,
_ => bail! {"Invalid listsize tag: {}", tag}
})
}
fn write_list_size(size: u16, stream: &mut Write) -> Result<()> {
match size {
0 => { stream.write_u8(LIST_EMPTY)?; }
1...256 => {
stream.write_u8(LIST_8)?;
stream.write_u8(size as u8)?;
}
_ => {
stream.write_u8(LIST_16)?;
stream.write_u16::<BigEndian>(size)?;
}
}
Ok(())
}
fn read_list(tag: u8, stream: &mut Read) -> Result<Vec<Node>> {
let size = read_list_size(tag, stream).chain_err(|| "Couldn't read list size")?;
let mut list = Vec::<Node>::with_capacity(size as usize);
for i in 0..size {
list.push(Node::deserialize_stream(stream).chain_err(|| format!("Couldn't read list item: {}, size: {}", i, size))?);
}
Ok(list)
}
fn write_list(list: Vec<Node>, stream: &mut Write) -> Result<()> {
write_list_size(list.len() as u16, stream)?;
for node in list {
node.serialize_stream(stream)?
}
Ok(())
}
fn nibble_to_char(nibble: u8) -> Result<char> {
Ok(match nibble {
0 => '0',
1 => '1',
2 => '2',
3 => '3',
4 => '4',
5 => '5',
6 => '6',
7 => '7',
8 => '8',
9 => '9',
10 => '-',
11 => '.',
15 => '\0',
_ => {
bail! {"invalid nibble: {}", nibble}
}
})
}
fn char_to_nibble(nibble: char) -> u8 {
match nibble {
'0' => 0,
'1' => 1,
'2' => 2,
'3' => 3,
'4' => 4,
'5' => 5,
'6' => 6,
'7' => 7,
'8' => 8,
'9' => 9,
'-' => 10,
'.' => 11,
'\0' => 15,
_ => {
panic!("invalid nibble")
}
}
}
fn read_node_content(tag: u8, stream: &mut Read) -> Result<NodeContent> {
Ok(match tag {
3...161 => NodeContent::Token(TOKENS[(tag - 3) as usize]),
DICTIONARY_0 | DICTIONARY_1 | DICTIONARY_2 | DICTIONARY_3 => {
stream.read_u8()?;
NodeContent::List(Vec::new())
}
LIST_EMPTY | LIST_8 | LIST_16 => NodeContent::List(read_list(tag, stream)?),
BINARY_8 => {
let mut buffer = vec![0u8; stream.read_u8()? as usize];
stream.read_exact(&mut buffer)?;
String::from_utf8(buffer).map(|string| NodeContent::String(string.cow())).unwrap_or_else(|err| NodeContent::Binary(err.into_bytes()))
}
BINARY_20 => {
let len: usize = ((stream.read_u8()? as usize & 0x0F) << 16) | (stream.read_u8()? as usize) << 8 | stream.read_u8()? as usize;
let mut buffer = vec![0u8; len];
stream.read_exact(&mut buffer)?;
String::from_utf8(buffer).map(|string| NodeContent::String(string.cow())).unwrap_or_else(|err| NodeContent::Binary(err.into_bytes()))
}
BINARY_32 => {
let mut buffer = vec![0u8; stream.read_u32::<BigEndian>()? as usize];
stream.read_exact(&mut buffer)?;
String::from_utf8(buffer).map(|string| NodeContent::String(string.cow())).unwrap_or_else(|err| NodeContent::Binary(err.into_bytes()))
}
JID_PAIR => {
NodeContent::Jid(Jid::from_node_pair(read_node_content(stream.read_u8()?, stream)?.into_string(), read_node_content(stream.read_u8()?, stream)?.as_str())?)
}
NIBBLE_8 | HEX_8 => {
let startbyte = stream.read_u8()?;
let mut string = String::with_capacity((startbyte as usize & 127) * 2);
for _ in 0..(startbyte & 127) {
let byte = stream.read_u8()?;
if tag == HEX_8 {
string.push(char::from_digit(u32::from((byte >> 4) & 0x0F), 16).unwrap().to_ascii_uppercase());
string.push(char::from_digit(u32::from(byte & 0x0F), 16).unwrap().to_ascii_uppercase());
} else {
let mut nibble = nibble_to_char((byte >> 4) & 0x0F)?;
if nibble == '\0' {
return Ok(NodeContent::Nibble(string.cow()));
}
string.push(nibble);
nibble = nibble_to_char(byte & 0x0F)?;
if nibble == '\0' {
return Ok(NodeContent::Nibble(string.cow()));
}
string.push(nibble);
}
}
/*
if startbyte >> 7 == 0 {
let len = string.len();
string.split_off(len - 1);
}*/
NodeContent::String(string.cow())
}
_ => {
bail! {"Invalid Tag {}", tag}
}
})
}
fn write_node_binary(binary: &[u8], stream: &mut Write) -> Result<()> {
let len = binary.len();
match len {
0...255 => {
stream.write_u8(BINARY_8)?;
stream.write_u8(len as u8)?;
}
256...1_048_575 => {
stream.write_u8(BINARY_20)?;
stream.write_u8((len >> 16) as u8)?;
stream.write_u8((len >> 8) as u8)?;
stream.write_u8(len as u8)?;
}
_ => {
stream.write_u8(BINARY_32)?;
stream.write_u32::<BigEndian>(len as u32)?;
}
}
stream.write_all(binary)?;
Ok(())
}
fn write_node_content(content: NodeContent, stream: &mut Write) -> Result<()> {
match content {
NodeContent::None => {
stream.write_u8(LIST_EMPTY)?;
write_list(Vec::new(), stream)?;
}
NodeContent::List(list) => { write_list(list, stream)?; }
NodeContent::String(string) => {
let string = string.deref();
if let Some(token) = TOKENS.iter().position(|r| r == &string) {
stream.write_u8((token + 3) as u8)?
} else {
write_node_binary(string.deref().as_bytes(), stream)?;
}
}
NodeContent::Binary(binary) => {
write_node_binary(&binary, stream)?;
}
NodeContent::Jid(jid) => {
stream.write_u8(JID_PAIR)?;
let pair = jid.into_node_pair();
write_node_content(NodeContent::Nibble(pair.0.cow()), stream)?;
write_node_content(NodeContent::Token(pair.1), stream)?;
}
NodeContent::Token(ref token) => {
stream.write_u8((TOKENS.iter().position(|r| r == token).unwrap() + 3) as u8)?
}
NodeContent::Nibble(string) => {
let mut len = (string.len() as u8 + 1) / 2;
stream.write_u8(NIBBLE_8)?;
stream.write_u8((string.len() as u8 % 2) << 7 | len)?;
let mut last_nibble = None;
for cha in string.chars() {
let nibble = char_to_nibble(cha);
if let Some(last_nibble) = last_nibble.take() {
stream.write_u8(last_nibble << 4 | nibble)?;
} else {
last_nibble = Some(nibble);
}
}
if let Some(last_nibble) = last_nibble {
stream.write_u8((last_nibble << 4) + 15)?;
}
}
}
Ok(())
}
impl Node {
#[inline]
pub fn new<D: IntoCow>(desc: D, attributes: HashMap<Cow<'static, str>, NodeContent>, content: NodeContent) -> Node {
Node {
desc: desc.cow(),
attributes,
content,
}
}
#[inline]
pub fn new_empty<D: IntoCow>(desc: D) -> Node {
Node {
desc: desc.cow(),
attributes: HashMap::new(),
content: NodeContent::None,
}
}
pub fn desc(&self) -> &str {
self.desc.deref()
}
pub fn take_attribute(&mut self, key: &'static str) -> Result<NodeContent> {
self.attributes.remove(&key.cow()).ok_or_else(|| ErrorKind::NodeAttributeMissing(key).into())
}
pub fn get_attribute<'a>(&'a self, key: &'static str) -> Result<&'a NodeContent> {
self.attributes.get(&key.cow()).ok_or_else(|| ErrorKind::NodeAttributeMissing(key).into())
}
pub fn set_attribute<K: IntoCow>(&mut self, key: K, value: NodeContent) {
self.attributes.insert(key.cow(), value);
}
pub fn deserialize(data: &[u8]) -> Result<Node> {
Node::deserialize_stream(&mut Cursor::new(data)).chain_err(|| "Node has invalid binary format")
}
fn deserialize_stream(stream: &mut Read) -> Result<Node> {
let list_size = read_list_size(stream.read_u8()?, stream).chain_err(|| "Couldn't read attribute count")?;
let desc = read_node_content(stream.read_u8()?, stream).chain_err(|| "Couldn't read description")?.into_cow();
let mut attributes = HashMap::new();
for _ in 0..((list_size - 1) >> 1) {
let attribute_name = read_node_content(stream.read_u8()?, stream).chain_err(|| format!("Couldn't read attribute name, node decription: {}", desc))?.into_cow();
let attribute_content = read_node_content(stream.read_u8()?, stream).chain_err(|| format!("Couldn't read attribute :{}, node decription: {}", attribute_name, desc))?;
attributes.insert(attribute_name, attribute_content);
}
let content = if list_size % 2 == 1 {
NodeContent::None
} else {
let tag = stream.read_u8()?;
match tag {
BINARY_8 => {
let mut buffer = vec![0u8; stream.read_u8()? as usize];
stream.read_exact(&mut buffer)?;
NodeContent::Binary(buffer)
}
BINARY_20 => {
let len: usize = ((stream.read_u8()? as usize & 0x0F) << 16) | (stream.read_u8()? as usize) << 8 | stream.read_u8()? as usize;
let mut buffer = vec![0u8; len];
stream.read_exact(&mut buffer)?;
NodeContent::Binary(buffer)
}
BINARY_32 => {
let mut buffer = vec![0u8; stream.read_u32::<BigEndian>()? as usize];
stream.read_exact(&mut buffer)?;
NodeContent::Binary(buffer)
}
_ => read_node_content(tag, stream).chain_err(|| format!("Couldn't read node content (list), node decription: {}", desc))?
}
};
Ok(Node { desc, attributes, content })
}
pub fn serialize(self) -> Vec<u8> {
let mut cursor = Cursor::new(Vec::new());
self.serialize_stream(&mut cursor).unwrap();
cursor.into_inner()
}
fn serialize_stream(self, stream: &mut Write) -> Result<()> {
let list_size = match self.content {
NodeContent::None => 1,
_ => 2
} + self.attributes.len() * 2;
write_list_size(list_size as u16, stream)?;
write_node_content(NodeContent::String(self.desc), stream)?;
for attribute in self.attributes {
write_node_content(NodeContent::String(attribute.0), stream)?;
write_node_content(attribute.1, stream)?;
}
match self.content {
NodeContent::None => {}
_ => { write_node_content(self.content, stream)?; }
}
Ok(())
}
}
impl Jid {
fn from_node_pair(id: String, surfix: &str) -> Result<Jid> {
Ok(Jid {
id,
is_group: match surfix {
"c.us" => false,
"g.us" => true,
"s.whatsapp.net" => false,
"broadcast" => false, //Todo
_ => bail! {"invalid jid surfix {}", surfix}
},
})
}
fn into_node_pair(self) -> (String, &'static str) {
(self.id, if self.is_group {
"g.us"
} else {
"c.us"
})
}
}
pub trait IntoCow {
fn cow(self) -> Cow<'static, str>;
}
impl IntoCow for &'static str {
fn cow(self) -> Cow<'static, str> {
Cow::Borrowed(self)
}
}
impl IntoCow for String {
fn cow(self) -> Cow<'static, str> {
Cow::Owned(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::Jid;
use std::str::FromStr;
#[test]
fn test_ser_de() {
let mut attributes = HashMap::new();
attributes.insert("jid".cow(), NodeContent::Jid(Jid::from_str("12123123-493244232342@g.us").unwrap()));
attributes.insert("type".cow(), NodeContent::Token("delete"));
let node = Node::new("action", HashMap::new(), NodeContent::List(vec![Node::new("chat", attributes, NodeContent::None)]));
let node_ser_de = Node::deserialize(&node.clone().serialize()).unwrap();
assert_eq!(node_ser_de, node);
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/json_protocol.rs | src/json_protocol.rs | use std::str::FromStr;
use json::JsonValue;
use base64;
use super::{Jid, PresenceStatus, GroupMetadata, GroupParticipantsChange, MediaType};
use message::MessageAckLevel;
use errors::*;
#[derive(Debug)]
pub enum ServerMessage<'a> {
ConnectionAck { user_jid: Jid, client_token: &'a str, server_token: &'a str, secret: Option<&'a str> },
ChallengeRequest(Vec<u8>),
Disconnect(Option<&'a str>),
PresenceChange { jid: Jid, status: PresenceStatus, time: Option<i64> },
MessageAck { message_id: &'a str, level: MessageAckLevel, sender: Jid, receiver: Jid, participant: Option<Jid>, time: i64 },
MessageAcks { message_ids: Vec<&'a str>, level: MessageAckLevel, sender: Jid, receiver: Jid, participant: Option<Jid>, time: i64 },
GroupIntroduce { newly_created: bool, inducer: Jid, meta: GroupMetadata },
GroupParticipantsChange { group: Jid, change: GroupParticipantsChange, inducer: Option<Jid>, participants: Vec<Jid> },
GroupSubjectChange { group: Jid, subject: String, subject_time: i64, subject_owner: Jid },
PictureChange { jid: Jid, removed: bool },
StatusChange(Jid, String)
}
impl<'a> ServerMessage<'a> {
#[inline]
pub fn deserialize(json: &'a JsonValue) -> Result<ServerMessage<'a>> {
let opcode = json[0].as_str().ok_or("server message without opcode")?;
let payload = &json[1];
Ok(match opcode {
"Conn" => {
ServerMessage::ConnectionAck {
user_jid: payload.get_str("wid").and_then(|jid| Jid::from_str(jid))?,
server_token: payload.get_str("serverToken")?,
client_token: payload.get_str("clientToken")?,
secret: payload["secret"].as_str()
}
}
"Cmd" => {
let cmd_type = payload.get_str("type")?;
match cmd_type {
"challenge" => {
ServerMessage::ChallengeRequest(base64::decode(&payload.get_str("challenge")?)?)
}
"disconnect" => {
ServerMessage::Disconnect(payload["kind"].as_str())
}
"picture" => {
ServerMessage::PictureChange { jid: Jid::from_str(payload.get_str("jid")?)?, removed: payload["tag"] == "removed" }
}
_ => bail! { "invalid or unsupported 'Cmd' subcommand type {}", cmd_type}
}
}
"Chat" => {
let chat = Jid::from_str(payload.get_str("id")?)?;
let data = &payload["data"];
let cmd_type = data[0].as_str().ok_or("chat command without subcommand")?;
let inducer = data[1].as_str().and_then(|jid| Jid::from_str(jid).ok());
match cmd_type {
typ @ "introduce" | typ @ "create" => {
let group_metadata_json = &data[2];
let admins_json = &group_metadata_json["admins"];
let regulars_json = &group_metadata_json["regulars"];
let mut participants = Vec::with_capacity(admins_json.len() + regulars_json.len());
for participant in admins_json.members() {
participants.push((Jid::from_str(participant.as_str().ok_or("not a string")?)?, true));
}
for participant in regulars_json.members() {
participants.push((Jid::from_str(participant.as_str().ok_or("not a string")?)?, false));
}
ServerMessage::GroupIntroduce {
inducer: inducer.ok_or("missing inducer")?,
newly_created: typ == "create",
meta: GroupMetadata {
id: chat,
owner: None,
creation_time: group_metadata_json.get_i64("creation")?,
subject: group_metadata_json.get_str("subject")?.to_string(),
subject_owner: Jid::from_str(group_metadata_json.get_str("s_o")?)?,
subject_time: group_metadata_json.get_i64("s_t")?,
participants
}
}
}
"add" | "remove" | "promote" | "demote" => {
let participants_json = &data[2]["participants"];
let mut participants = Vec::with_capacity(participants_json.len());
for participant in participants_json.members() {
participants.push(Jid::from_str(participant.as_str().ok_or("not a string")?)?)
}
ServerMessage::GroupParticipantsChange {
inducer,
group: chat,
participants,
change: GroupParticipantsChange::from_json(cmd_type).unwrap()
}
}
"subject" => {
let subject_json = &data[2];
ServerMessage::GroupSubjectChange {
subject_owner: inducer.ok_or("missing inducer")?,
group: chat,
subject: subject_json.get_str("subject")?.to_string(),
subject_time: subject_json.get_i64("s_t")?
}
}
_ => bail! { "invalid or unsupported 'Chat' subcommand type {}", cmd_type}
}
}
"Msg" | "MsgInfo" => {
let cmd_type = payload.get_str("cmd")?;
match cmd_type {
"ack" => ServerMessage::MessageAck {
message_id: payload.get_str("id")?,
sender: Jid::from_str(payload.get_str("from")?)?,
receiver: Jid::from_str(payload.get_str("to")?)?,
participant: payload["participant"].as_str().and_then(|jid| Jid::from_str(jid).ok()),
time: payload.get_i64("t")?,
level: MessageAckLevel::from_json(payload.get_u8("ack")?)?
},
"acks" => ServerMessage::MessageAcks {
message_ids: payload["id"].members().map(|id| id.as_str().unwrap()).collect(),
sender: Jid::from_str(payload.get_str("from")?)?,
receiver: Jid::from_str(payload.get_str("to")?)?,
participant: payload["participant"].as_str().and_then(|jid| Jid::from_str(jid).ok()),
time: payload.get_i64("t")?,
level: MessageAckLevel::from_json(payload.get_u8("ack")?)?
},
_ => bail! { "invalid or unsupported 'Msg' or 'MsgInfo' subcommand type {}", cmd_type}
}
}
"Presence" => {
ServerMessage::PresenceChange {
jid: Jid::from_str(payload.get_str("id")?)?,
status: PresenceStatus::from_json(payload.get_str("type")?)?,
time: payload["t"].as_i64()
}
}
"Status" => {
ServerMessage::StatusChange(Jid::from_str(payload.get_str("id")?)?, payload.get_str("status")?.to_string())
}
_ => bail! { "invalid or unsupported opcode {}", opcode}
})
}
}
impl MessageAckLevel {
fn from_json(value: u8) -> Result<MessageAckLevel> {
Ok(match value {
0 => MessageAckLevel::PendingSend,
1 => MessageAckLevel::Send,
2 => MessageAckLevel::Received,
3 => MessageAckLevel::Read,
4 => MessageAckLevel::Played,
_ => bail! {"Invalid message ack level {}", value}
})
}
}
impl PresenceStatus {
fn from_json(value: &str) -> Result<PresenceStatus> {
Ok(match value {
"unavailable" => PresenceStatus::Unavailable,
"available" => PresenceStatus::Available,
"composing" => PresenceStatus::Typing,
"recording" => PresenceStatus::Recording,
_ => bail! {"Invalid presence status {}", value}
})
}
}
impl GroupMetadata {
fn from_json(value: &JsonValue) -> Result<GroupMetadata> {
let participants_json = &value["participants"];
let mut participants = Vec::with_capacity(participants_json.len());
for participant in participants_json.members() {
participants.push((Jid::from_str(participant.get_str("id")?)?, participant.get_bool("isAdmin")?));
}
Ok(GroupMetadata {
id: Jid::from_str(value.get_str("id")?)?,
creation_time: value.get_i64("creation")?,
owner: Some(Jid::from_str(value.get_str("owner")?)?),
participants,
subject: value.get_str("subject")?.to_string(),
subject_time: value.get_i64("subjectTime")?,
subject_owner: Jid::from_str(value.get_str("subjectOwner")?)?
})
}
}
impl GroupParticipantsChange {
fn from_json(value: &str) -> Result<GroupParticipantsChange> {
Ok(match value {
"add" => GroupParticipantsChange::Add,
"remove" => GroupParticipantsChange::Remove,
"promote" => GroupParticipantsChange::Promote,
"demote" => GroupParticipantsChange::Demote,
_ => bail! {"invalid group command {}", value}
})
}
}
pub fn parse_response_status(response: &JsonValue) -> Result<()> {
response["status"].as_u16().map_or(Ok(()), |status_code| if status_code == 200 {
Ok(())
} else {
bail! {"received status code {}", status_code}
})
}
pub fn build_init_request(client_id: &str) -> JsonValue {
array!["admin", "init", array![0, 3, 416], array!["ww-rs", "ww-rs"], client_id, true]
}
pub fn parse_init_response<'a>(response: &'a JsonValue) -> Result<&'a str> {
parse_response_status(response)?;
response.get_str("ref")
}
pub fn build_takeover_request(client_token: &str, server_token: &str, client_id: &str) -> JsonValue {
array!["admin", "login", client_token, server_token, client_id, "takeover"]
}
pub fn build_challenge_response(server_token: &str, client_id: &str, signature: &[u8]) -> JsonValue {
array!["admin","challenge", base64::encode(&signature), server_token, client_id]
}
pub fn build_presence_subscribe(jid: &Jid) -> JsonValue {
array!["action", "presence", "subscribe", jid.to_string()]
}
pub fn build_file_upload_request(hash: &[u8], media_type: MediaType) -> JsonValue {
array!["action", "encr_upload", match media_type {
MediaType::Image => "image",
MediaType::Video => "video",
MediaType::Audio => "audio",
MediaType::Document => "document",
}, base64::encode(hash)]
}
pub fn parse_file_upload_response<'a>(response: &'a JsonValue) -> Result<&'a str> {
parse_response_status(response)?;
response.get_str("url")
}
pub fn build_profile_picture_request(jid: &Jid) -> JsonValue {
array!["query", "ProfilePicThumb", jid.to_string()]
}
pub fn parse_profile_picture_response(response: &JsonValue) -> Option<&str> {
response["eurl"].as_str()
}
pub fn build_profile_status_request(jid: &Jid) -> JsonValue {
array!["query", "Status", jid.to_string()]
}
pub fn parse_profile_status_response(response: &JsonValue) -> Option<&str> {
response["status"].as_str()
}
pub fn build_group_metadata_request(jid: &Jid) -> JsonValue {
array!["query", "GroupMetadata", jid.to_string()]
}
pub fn parse_group_metadata_response(response: &JsonValue) -> Result<GroupMetadata> {
parse_response_status(response)?;
GroupMetadata::from_json(response)
}
pub trait JsonNonNull {
fn get_str(&self, field: &'static str) -> Result<&str>;
fn get_i64<'a>(&'a self, field: &'static str) -> Result<i64>;
fn get_u8<'a>(&'a self, field: &'static str) -> Result<u8>;
fn get_bool<'a>(&'a self, field: &'static str) -> Result<bool>;
}
impl JsonNonNull for JsonValue {
fn get_str<'a>(&'a self, field: &'static str) -> Result<&'a str> {
self[field].as_str().ok_or_else(|| ErrorKind::JsonFieldMissing(field).into())
}
fn get_i64<'a>(&'a self, field: &'static str) -> Result<i64> {
self[field].as_i64().ok_or_else(|| ErrorKind::JsonFieldMissing(field).into())
}
fn get_u8<'a>(&'a self, field: &'static str) -> Result<u8> {
self[field].as_u8().ok_or_else(|| ErrorKind::JsonFieldMissing(field).into())
}
fn get_bool<'a>(&'a self, field: &'static str) -> Result<bool> {
self[field].as_bool().ok_or_else(|| ErrorKind::JsonFieldMissing(field).into())
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/crypto.rs | src/crypto.rs | extern crate crypto;
use ring;
use ring::{agreement, rand, hkdf, hmac, digest};
use ring::rand::{SystemRandom, SecureRandom};
use self::crypto::{aes, blockmodes};
use self::crypto::buffer::{RefWriteBuffer, RefReadBuffer, WriteBuffer};
use untrusted;
use MediaType;
use errors::*;
pub(crate) fn generate_keypair() -> (agreement::EphemeralPrivateKey, Vec<u8>) {
let rng = rand::SystemRandom::new();
let my_private_key =
agreement::EphemeralPrivateKey::generate(&agreement::X25519, &rng).unwrap();
let mut my_public_key = vec![0u8; my_private_key.public_key_len()];
my_private_key.compute_public_key(&mut my_public_key).unwrap();
(my_private_key, my_public_key)
}
pub(crate) fn calculate_secret_keys(secret: &[u8], private_key: agreement::EphemeralPrivateKey) -> Result<([u8; 32], [u8; 32])> {
let peer_public_key_alg = &agreement::X25519;
let public_key = untrusted::Input::from(&secret[..32]);
let secret_key = agreement::agree_ephemeral(private_key, peer_public_key_alg,
public_key, ring::error::Unspecified,
|key_material| {
Ok(Vec::from(key_material))
}).unwrap();
let mut secret_key_expanded = [0u8; 80];
hkdf::extract_and_expand(&hmac::SigningKey::new(&digest::SHA256, &[0u8; 32]), &secret_key, &[], &mut secret_key_expanded);
let signature = [&secret[..32], &secret[64..]].concat();
hmac::verify(&hmac::VerificationKey::new(&digest::SHA256, &secret_key_expanded[32..64]), &signature, &secret[32..64]).chain_err(|| "Invalid mac")?;
let mut buffer = [0u8; 64];
aes_decrypt(&secret_key_expanded[..32], &secret_key_expanded[64..], &secret[64..144], &mut buffer);
let mut enc = [0; 32];
let mut mac = [0; 32];
enc.copy_from_slice(&buffer[..32]);
mac.copy_from_slice(&buffer[32..]);
Ok((enc, mac))
}
pub fn verify_and_decrypt_message(enc: &[u8], mac: &[u8], message_encrypted: &[u8]) -> Result<Vec<u8>> {
hmac::verify(&hmac::VerificationKey::new(&digest::SHA256, &mac),
&message_encrypted[32..], &message_encrypted[..32]).chain_err(|| "Invalid mac")?;
let mut message = vec![0u8; message_encrypted.len() - 48];
let size_without_padding = aes_decrypt(enc, &message_encrypted[32..48], &message_encrypted[48..], &mut message);
message.truncate(size_without_padding);
Ok(message)
}
pub(crate) fn sign_and_encrypt_message(enc: &[u8], mac: &[u8], message: &[u8]) -> Vec<u8> {
let mut message_encrypted = vec![0u8; 32 + 16 + message.len() + 32];
let mut iv = vec![0u8; 16];
SystemRandom::new().fill(&mut iv).unwrap();
let size_with_padding = aes_encrypt(enc, &iv, &message, &mut message_encrypted[48..]);
message_encrypted.truncate(32 + 16 + size_with_padding);
message_encrypted[32..48].clone_from_slice(&iv);
let signature = hmac::sign(&hmac::SigningKey::new(&digest::SHA256, &mac),
&message_encrypted[32..]);
message_encrypted[0..32].clone_from_slice(signature.as_ref());
message_encrypted
}
pub(crate) fn sign_challenge(mac: &[u8], challenge: &[u8]) -> hmac::Signature {
hmac::sign(&hmac::SigningKey::new(&digest::SHA256, &mac), &challenge)
}
fn derive_media_keys(key: &[u8], media_type: MediaType) -> [u8; 112] {
let mut media_key_expanded = [0u8; 112];
hkdf::extract_and_expand(&hmac::SigningKey::new(&digest::SHA256, &[0u8; 32]), key, match media_type {
MediaType::Image => b"WhatsApp Image Keys",
MediaType::Video => b"WhatsApp Video Keys",
MediaType::Audio => b"WhatsApp Audio Keys",
MediaType::Document => b"WhatsApp Document Keys",
}, &mut media_key_expanded);
media_key_expanded
}
pub fn sha256(file: &[u8]) -> Vec<u8> {
let mut hash = Vec::with_capacity(32);
hash.extend_from_slice(digest::digest(&digest::SHA256, file).as_ref());
hash
}
pub fn encrypt_media_message(media_type: MediaType, file: &[u8]) -> (Vec<u8>, Vec<u8>) {
let mut media_key = vec![0u8; 32];
SystemRandom::new().fill(&mut media_key).unwrap();
let media_key_expanded = derive_media_keys(&media_key, media_type);
let mut file_encrypted = vec![0u8; 10 + file.len() + 32];
let mut cipher_key = Vec::with_capacity(32);
cipher_key.extend_from_slice(&media_key_expanded[16..48]);
let iv = &media_key_expanded[0..16];
let size_with_padding = aes_encrypt(&cipher_key, iv, &file, &mut file_encrypted);
file_encrypted.truncate(size_with_padding);
let hmac_data = [iv, &file_encrypted].concat();
let signature = hmac::sign(&hmac::SigningKey::new(&digest::SHA256, &media_key_expanded[48..80]),
&hmac_data);
file_encrypted.extend_from_slice(&signature.as_ref()[0..10]);
(file_encrypted, media_key)
}
pub fn decrypt_media_message(key: &[u8], media_type: MediaType, file_encrypted: &[u8]) -> Result<Vec<u8>> {
let media_key_expanded = derive_media_keys(key, media_type);
let mut file = vec![0u8; file_encrypted.len() - 10];
let mut cipher_key = Vec::with_capacity(32);
cipher_key.extend_from_slice(&media_key_expanded[16..48]);
let size = file_encrypted.len();
let hmac_data = [&media_key_expanded[0..16], &file_encrypted[..size - 10]].concat();
let signature = hmac::sign(&hmac::SigningKey::new(&digest::SHA256, &media_key_expanded[48..80]),
&hmac_data);
if file_encrypted[(size - 10)..] != signature.as_ref()[..10] {
bail! {"Invalid mac"}
}
let size_without_padding = aes_decrypt(&cipher_key, &media_key_expanded[0..16], &file_encrypted[..size - 10], &mut file);
file.truncate(size_without_padding);
Ok(file)
}
pub(crate) fn aes_encrypt(key: &[u8], iv: &[u8], input: &[u8], output: &mut [u8]) -> usize {
let mut aes_encrypt = aes::cbc_encryptor(aes::KeySize::KeySize256, key, iv, blockmodes::PkcsPadding);
let mut read_buffer = RefReadBuffer::new(input);
let mut write_buffer = RefWriteBuffer::new(output);
aes_encrypt.encrypt(&mut read_buffer, &mut write_buffer, true).unwrap();
write_buffer.position()
}
pub(crate) fn aes_decrypt(key: &[u8], iv: &[u8], input: &[u8], output: &mut [u8]) -> usize {
let mut aes_decrypt = aes::cbc_decryptor(aes::KeySize::KeySize256, key, iv, blockmodes::PkcsPadding);
let mut read_buffer = RefReadBuffer::new(input);
let mut write_buffer = RefWriteBuffer::new(output);
aes_decrypt.decrypt(&mut read_buffer, &mut write_buffer, true).unwrap();
write_buffer.position()
}
#[cfg(test)]
mod tests {
use super::*;
use base64;
use node_wire::Node;
use std::io::stdin;
#[test]
#[ignore]
fn decrypt_node_from_browser() {
let enc = base64::decode("").unwrap();
let mac = base64::decode("").unwrap();
loop {
let mut line = String::new();
stdin().read_line(&mut line).unwrap();
let len = line.len();
line.truncate(len - 1);
let msg = base64::decode(&line).unwrap();
let pos = msg.iter().position(|x| x == &b',').unwrap() + 3;
let dec_msg = verify_and_decrypt_message(&enc, &mac, &msg[pos..]).unwrap();
let node = Node::deserialize(&dec_msg).unwrap();
println!("{:?}", node);
}
}
#[test]
fn test_encrypt_decrypt_message() {
let mut enc = vec![0u8; 32];
SystemRandom::new().fill(&mut enc).unwrap();
let mut mac = vec![0u8; 32];
SystemRandom::new().fill(&mut mac).unwrap();
let mut msg = vec![0u8; 30];
SystemRandom::new().fill(&mut msg).unwrap();
let enc_msg = sign_and_encrypt_message(&enc, &mac, &msg);
let dec_msg = verify_and_decrypt_message(&enc, &mac, &enc_msg).unwrap();
assert_eq!(msg, dec_msg);
}
#[test]
fn test_encrypt_decrypt_media() {
let mut msg = vec![0u8; 300];
SystemRandom::new().fill(&mut msg).unwrap();
let media_type = MediaType::Image;
let (enc_msg, key) = encrypt_media_message(media_type, &msg);
let dec_msg = decrypt_media_message(&key, media_type, &enc_msg).unwrap();
assert_eq!(msg, dec_msg);
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/src/message.rs | src/message.rs | use std::time::Duration;
use std::str::FromStr;
use protobuf;
use chrono::NaiveDateTime;
use protobuf::Message;
use ring::rand::{SystemRandom, SecureRandom};
use super::message_wire;
use super::Jid;
use errors::*;
#[derive(Debug, Clone, PartialOrd, PartialEq)]
pub struct MessageId(pub String);
impl MessageId {
pub fn generate() -> MessageId {
let mut message_id_binary = vec![0u8; 12];
message_id_binary[0] = 0x3E;
message_id_binary[1] = 0xB0;
SystemRandom::new().fill(&mut message_id_binary[2..]).unwrap();
MessageId(message_id_binary.iter().map(|b| format!("{:X}", b)).collect::<Vec<_>>().concat())
}
}
#[derive(Debug, Clone)]
pub enum Peer {
Individual(Jid),
Group { group: Jid, participant: Jid },
}
#[derive(Debug, Clone)]
pub enum PeerAck {
Individual(Jid),
GroupIndividual { group: Jid, participant: Jid },
GroupAll(Jid),
}
#[derive(Debug)]
pub enum Direction {
Sending(Jid),
Receiving(Peer),
}
impl Direction {
fn parse(mut key: message_wire::MessageKey) -> Result<Direction> {
let remote_jid = Jid::from_str(&key.take_remoteJid())?;
Ok(if key.get_fromMe() {
Direction::Sending(remote_jid)
} else {
Direction::Receiving(if key.has_participant() {
Peer::Group { group: remote_jid, participant: Jid::from_str(&key.take_participant())? }
} else {
Peer::Individual(remote_jid)
})
})
}
}
#[derive(Debug, Copy, Clone)]
pub enum MessageAckLevel {
PendingSend = 0,
Send = 1,
Received = 2,
Read = 3,
Played = 4,
}
#[derive(Debug)]
pub enum MessageAckSide {
Here(Peer),
There(PeerAck),
}
#[derive(Debug)]
pub struct MessageAck {
pub level: MessageAckLevel,
pub time: Option<i64>,
pub id: MessageId,
pub side: MessageAckSide,
}
impl MessageAck {
pub fn from_server_message(message_id: &str, level: MessageAckLevel, sender: Jid, receiver: Jid, participant: Option<Jid>, time: i64, own_jid: &Jid) -> MessageAck {
MessageAck {
level,
time: Some(time),
id: MessageId(message_id.to_string()),
side: if own_jid == &sender {
MessageAckSide::There(if let Some(participant) = participant {
PeerAck::GroupIndividual { group: receiver, participant }
} else {
PeerAck::Individual(receiver)
})
} else {
MessageAckSide::Here(if let Some(participant) = participant {
Peer::Group { group: sender, participant }
} else {
Peer::Individual(sender)
})
},
}
}
pub fn from_app_message(message_id: MessageId, level: MessageAckLevel, jid: Jid, participant: Option<Jid>, owner: bool) -> MessageAck {
MessageAck {
level,
time: None,
id: message_id,
side: if owner {
MessageAckSide::There(if jid.is_group {
PeerAck::GroupAll(jid)
} else {
PeerAck::Individual(jid)
})
} else {
MessageAckSide::Here(if let Some(participant) = participant {
Peer::Group { group: jid, participant }
} else {
Peer::Individual(jid)
})
},
}
}
}
#[derive(Debug)]
pub struct FileInfo {
pub url: String,
pub mime: String,
pub sha256: Vec<u8>,
pub enc_sha256: Vec<u8>,
pub size: usize,
pub key: Vec<u8>,
}
#[derive(Debug)]
pub enum ChatMessageContent {
Text(String),
Image(FileInfo, (u32, u32), Vec<u8>),
Audio(FileInfo, Duration),
Document(FileInfo, String),
}
impl ChatMessageContent {
fn from_proto(mut message: message_wire::Message) -> Result<ChatMessageContent> {
Ok(if message.has_conversation() {
ChatMessageContent::Text(message.take_conversation())
} else if message.has_imageMessage() {
let mut image_message = message.take_imageMessage();
ChatMessageContent::Image(FileInfo {
url: image_message.take_url(),
mime: image_message.take_mimetype(),
sha256: image_message.take_fileSha256(),
enc_sha256: image_message.take_fileEncSha256(),
size: image_message.get_fileLength() as usize,
key: image_message.take_mediaKey(),
}, (image_message.get_height(), image_message.get_width()), image_message.take_jpegThumbnail())
} else if message.has_audioMessage() {
let mut audio_message = message.take_audioMessage();
ChatMessageContent::Audio(FileInfo {
url: audio_message.take_url(),
mime: audio_message.take_mimetype(),
sha256: audio_message.take_fileSha256(),
enc_sha256: audio_message.take_fileEncSha256(),
size: audio_message.get_fileLength() as usize,
key: audio_message.take_mediaKey(),
}, Duration::new(u64::from(audio_message.get_seconds()), 0))
} else if message.has_documentMessage() {
let mut document_message = message.take_documentMessage();
ChatMessageContent::Document(FileInfo {
url: document_message.take_url(),
mime: document_message.take_mimetype(),
sha256: document_message.take_fileSha256(),
enc_sha256: document_message.take_fileEncSha256(),
size: document_message.get_fileLength() as usize,
key: document_message.take_mediaKey(),
}, document_message.take_fileName())
} else {
ChatMessageContent::Text("TODO".to_string())
})
}
pub fn into_proto(self) -> message_wire::Message {
let mut message = message_wire::Message::new();
match self {
ChatMessageContent::Text(text) => message.set_conversation(text),
ChatMessageContent::Image(info, size, thumbnail) => {
let mut image_message = message_wire::ImageMessage::new();
image_message.set_url(info.url);
image_message.set_mimetype(info.mime);
image_message.set_fileEncSha256(info.enc_sha256);
image_message.set_fileSha256(info.sha256);
image_message.set_fileLength(info.size as u64);
image_message.set_mediaKey(info.key);
image_message.set_height(size.0);
image_message.set_width(size.1);
image_message.set_jpegThumbnail(thumbnail);
message.set_imageMessage(image_message);
}
ChatMessageContent::Document(info, filename) => {
let mut document_message = message_wire::DocumentMessage::new();
document_message.set_url(info.url);
document_message.set_mimetype(info.mime);
document_message.set_fileEncSha256(info.enc_sha256);
document_message.set_fileSha256(info.sha256);
document_message.set_fileLength(info.size as u64);
document_message.set_mediaKey(info.key);
document_message.set_fileName(filename);
message.set_documentMessage(document_message);
}
_ => unimplemented!()
}
message
}
}
#[derive(Debug)]
pub struct ChatMessage {
pub direction: Direction,
pub time: NaiveDateTime,
pub id: MessageId,
pub content: ChatMessageContent,
}
impl ChatMessage {
pub fn from_proto_binary(content: &[u8]) -> Result<ChatMessage> {
let webmessage = protobuf::parse_from_bytes::<message_wire::WebMessageInfo>(content).chain_err(|| "Invalid Protobuf chatmessage")?;
ChatMessage::from_proto(webmessage)
}
pub fn from_proto(mut webmessage: message_wire::WebMessageInfo) -> Result<ChatMessage> {
debug!("Processing WebMessageInfo: {:?}", &webmessage);
let mut key = webmessage.take_key();
Ok(ChatMessage {
id: MessageId(key.take_id()),
direction: Direction::parse(key)?,
time: NaiveDateTime::from_timestamp(webmessage.get_messageTimestamp() as i64, 0),
content: ChatMessageContent::from_proto(webmessage.take_message())?,
})
}
pub fn into_proto_binary(self) -> Vec<u8> {
let webmessage = self.into_proto();
webmessage.write_to_bytes().unwrap()
}
pub fn into_proto(self) -> message_wire::WebMessageInfo {
let mut webmessage = message_wire::WebMessageInfo::new();
let mut key = message_wire::MessageKey::new();
key.set_id(self.id.0);
match self.direction {
Direction::Sending(jid) => {
key.set_remoteJid(jid.to_message_jid());
key.set_fromMe(true);
}
Direction::Receiving(_) => unimplemented!()
}
webmessage.set_key(key);
webmessage.set_messageTimestamp(self.time.timestamp() as u64);
webmessage.set_message(self.content.into_proto());
webmessage.set_status(message_wire::WebMessageInfo_STATUS::PENDING);
debug!("Building WebMessageInfo: {:?}", &webmessage);
webmessage
}
}
impl Jid {
pub fn to_message_jid(&self) -> String {
self.id.to_string() + if self.is_group { "@g.us" } else { "@s.whatsapp.net" }
}
} | rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/examples/presence.rs | examples/presence.rs | extern crate simple_logger;
#[macro_use]
extern crate log;
extern crate qrcode;
extern crate image;
extern crate bincode;
extern crate whatsappweb;
extern crate reqwest;
extern crate base64;
use std::fs::{File, OpenOptions};
use image::Luma;
use whatsappweb::connection;
use whatsappweb::connection::{DisconnectReason, PersistentSession, WhatsappWebHandler, WhatsappWebConnection, UserData, State};
use whatsappweb::message::ChatMessage;
use whatsappweb::Jid;
const SESSION_FILENAME: &str = "session.bin";
struct Handler {
subscribed_jid: Jid
}
impl WhatsappWebHandler for Handler {
fn on_state_changed(&self, connection: &WhatsappWebConnection<Handler>, state: State) {
info!("new state: {:?}", state);
if state == State::Connected {
connection.subscribe_presence(&self.subscribed_jid);
}
}
fn on_persistent_session_data_changed(&self, persistent_session: PersistentSession) {
bincode::serialize_into(OpenOptions::new().create(true).write(true).open(SESSION_FILENAME).unwrap(), &persistent_session).unwrap();
}
fn on_user_data_changed(&self, _: &WhatsappWebConnection<Handler>, user_data: UserData) {
if let UserData::PresenceChange(jid, status, _) = user_data {
if jid == self.subscribed_jid {
info!("{} is now {:?}", jid.phonenumber().unwrap(), status);
}
}
}
fn on_disconnect(&self, _: DisconnectReason) {
info!("disconnected");
}
fn on_message(&self, _: &WhatsappWebConnection<Handler>, _: bool, _: Box<ChatMessage>) {}
}
fn main() {
simple_logger::init_with_level(log::Level::Info).unwrap();
let handler = Handler {subscribed_jid: Jid::from_phone_number("+49123456789".to_string()).unwrap()};
if let Ok(file) = File::open(SESSION_FILENAME) {
let (_, join_handle) = connection::with_persistent_session(bincode::deserialize_from(file).unwrap(), handler);
join_handle.join().unwrap();
} else {
let (_, join_handle) = connection::new(|qr| { qr.render::<Luma<u8>>().module_dimensions(10, 10).build().save("login_qr.png").unwrap(); }, handler);
join_handle.join().unwrap();
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/examples/media.rs | examples/media.rs | extern crate simple_logger;
#[macro_use]
extern crate log;
extern crate qrcode;
extern crate image;
extern crate bincode;
extern crate whatsappweb;
extern crate reqwest;
extern crate base64;
use std::fs::{File, OpenOptions, remove_file};
use std::io::Read;
use std::sync::Arc;
use image::Luma;
use whatsappweb::connection;
use whatsappweb::connection::{DisconnectReason, PersistentSession, WhatsappWebHandler, WhatsappWebConnection, UserData, State};
use whatsappweb::message::{ChatMessage, ChatMessageContent};
use whatsappweb::media;
use whatsappweb::{Jid, MediaType};
const SESSION_FILENAME: &str = "session.bin";
struct Handler {}
impl WhatsappWebHandler for Handler {
fn on_state_changed(&self, connection: &WhatsappWebConnection<Handler>, state: State) {
info!("new state: {:?}", state);
if state == State::Connected {
let mut file = Vec::new();
File::open("path/to/image.jpg").unwrap().read_to_end(&mut file).unwrap();
let connection0 = connection.clone();
let (thumbnail, size) = media::generate_thumbnail_and_get_size(&file);
let thumbnail = Arc::new(thumbnail);
media::upload_file(&file, MediaType::Image, &connection, Box::new(move |file_info| {
let jid = Jid::from_phone_number("+49123456789".to_string()).unwrap();
connection0.send_message(ChatMessageContent::Image(file_info.unwrap(), size, thumbnail.to_vec()), jid);
}));
}
}
fn on_persistent_session_data_changed(&self, persistent_session: PersistentSession) {
bincode::serialize_into(OpenOptions::new().create(true).write(true).open(SESSION_FILENAME).unwrap(), &persistent_session).unwrap();
}
fn on_user_data_changed(&self, _: &WhatsappWebConnection<Handler>, _: UserData) {}
fn on_disconnect(&self, reason: DisconnectReason) {
info!("disconnected");
match reason {
DisconnectReason::Removed => {
remove_file(SESSION_FILENAME).unwrap();
}
_ => {}
}
}
fn on_message(&self, _: &WhatsappWebConnection<Handler>, _: bool, _: Box<ChatMessage>) {}
}
fn main() {
simple_logger::init_with_level(log::Level::Debug).unwrap();
let handler = Handler {};
if let Ok(file) = File::open(SESSION_FILENAME) {
let (_, join_handle) = connection::with_persistent_session(bincode::deserialize_from(file).unwrap(), handler);
join_handle.join().unwrap();
} else {
let (_, join_handle) = connection::new(|qr| { qr.render::<Luma<u8>>().module_dimensions(10, 10).build().save("login_qr.png").unwrap(); }, handler);
join_handle.join().unwrap();
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
wiomoc/whatsappweb-rs | https://github.com/wiomoc/whatsappweb-rs/blob/dacf1ef4d9879c3ca2346620ebd94ed449bab654/examples/echo.rs | examples/echo.rs | extern crate simple_logger;
#[macro_use]
extern crate log;
extern crate qrcode;
extern crate image;
extern crate bincode;
extern crate whatsappweb;
extern crate reqwest;
extern crate base64;
use std::fs::{File, OpenOptions, remove_file};
use std::io::{Read, Write, Cursor};
use std::sync::{RwLock, Arc};
use std::str::FromStr;
use image::Luma;
use whatsappweb::connection::*;
use whatsappweb::{Jid, Contact, PresenceStatus, GroupParticipantsChange, ChatAction, MediaType};
use whatsappweb::message::{MessageAck, MessageAckSide, MessageAckLevel, Direction, Peer, ChatMessageContent, ChatMessage};
use whatsappweb::crypto;
use whatsappweb::media;
const SESSION_FILENAME: &str = "session.bin";
struct Handler {}
impl WhatsappWebHandler for Handler {
fn on_state_changed(&self, connection: &WhatsappWebConnection<Handler>, state: State) {
info!("new state: {:?}", state);
}
fn on_persistent_session_data_changed(&self, persistent_session: PersistentSession) {
bincode::serialize_into(OpenOptions::new().create(true).write(true).open(SESSION_FILENAME).unwrap(), &persistent_session).unwrap();
}
fn on_user_data_changed(&self, connection: &WhatsappWebConnection<Handler>, user_data: UserData) {
info!("userdata changed: {:?}", user_data);
}
fn on_disconnect(&self, reason: whatsappweb::connection::DisconnectReason) {
info!("disconnected");
match reason {
whatsappweb::connection::DisconnectReason::Removed => {
remove_file(SESSION_FILENAME).unwrap();
}
_ => {}
}
}
fn on_message(&self, connection: &WhatsappWebConnection<Handler>, message_new: bool, message: Box<ChatMessage>) {
if !message_new {
return;
}
let message = *message;
let accepted_jid = Jid::from_str("491234567@c.us").unwrap();
let peer = match message.direction {
Direction::Receiving(peer) => peer,
_ => return
};
match &peer {
&Peer::Individual(ref jid) => if jid != &accepted_jid { return; }
_ => return
}
connection.send_message_read(message.id.clone(), peer.clone());
match message.content {
ChatMessageContent::Text(text) => {
connection.send_message(ChatMessageContent::Text(text), accepted_jid);
}
_ => {}
}
}
}
fn main() {
let handler = Handler {};
if let Ok(file) = File::open(SESSION_FILENAME) {
let (_, join_handle) = whatsappweb::connection::with_persistent_session(bincode::deserialize_from(file).unwrap(), handler);
join_handle.join().unwrap();
} else {
let (_, join_handle) = whatsappweb::connection::new(|qr| { qr.render::<Luma<u8>>().module_dimensions(10, 10).build().save("login_qr.png").unwrap(); }, handler);
join_handle.join().unwrap();
}
}
| rust | MIT | dacf1ef4d9879c3ca2346620ebd94ed449bab654 | 2026-01-04T20:21:21.071998Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/markup.rs | src/markup.rs | use crate::ocr::MarkupBox;
use colorsys::{Hsl, Rgb};
use image::{DynamicImage, GenericImage};
use imageproc::rect::Rect;
fn safe_put_pixel(
origin_image: &mut DynamicImage,
x: u32,
y: u32,
color: image::Rgba<u8>,
) -> anyhow::Result<()> {
if x < origin_image.width() && y < origin_image.height() {
origin_image.put_pixel(x, y, color);
}
Ok(())
}
pub struct ImageMarkupDecorator {}
impl ImageMarkupDecorator {
pub fn new() -> Self {
ImageMarkupDecorator {}
}
pub fn markup_recognition(
&self,
origin_image: &DynamicImage,
markups: &Vec<MarkupBox>,
) -> anyhow::Result<DynamicImage> {
let mut result = origin_image.clone();
// darken the area not in the markups
imageproc::map::map_pixels_mut(&mut result, |x, y, it| {
if is_pixel_in_boxes(x, y, markups) {
it
} else {
let rgb = Rgb::from((it[0], it[1], it[2]));
let mut hsl: Hsl = rgb.as_ref().into();
hsl.set_lightness(hsl.lightness() * 0.2);
hsl.set_saturation(0.0);
let rgb: Rgb = hsl.into();
image::Rgba([rgb.red() as u8, rgb.green() as u8, rgb.blue() as u8, it[3]])
}
});
for markup in markups {
self.draw_box(&mut result, markup, image::Rgba([255, 255, 0, 128]), 4)?;
}
Ok(result)
}
fn draw_box(
&self,
origin_image: &mut DynamicImage,
markup: &MarkupBox,
color: image::Rgba<u8>,
border: i32,
) -> anyhow::Result<()> {
// top
let rect_top = Rect::at(
markup.left as i32 - border as i32,
markup.top as i32 - border as i32,
)
.of_size(markup.width + border as u32 * 2, border as u32);
imageproc::drawing::draw_filled_rect_mut(origin_image, rect_top, color);
// bottom
let rect_bottom = Rect::at(
markup.left as i32 - border as i32,
markup.top as i32 + markup.height as i32,
)
.of_size(markup.width + border as u32 * 2, border as u32);
imageproc::drawing::draw_filled_rect_mut(origin_image, rect_bottom, color);
// left
let rect_left = Rect::at(
markup.left as i32 - border as i32,
markup.top as i32 - border as i32,
)
.of_size(border as u32, markup.height + border as u32 * 2);
imageproc::drawing::draw_filled_rect_mut(origin_image, rect_left, color);
// right
let rect_right = Rect::at(
markup.left as i32 + markup.width as i32,
markup.top as i32 - border as i32,
)
.of_size(border as u32, markup.height + border as u32 * 2);
imageproc::drawing::draw_filled_rect_mut(origin_image, rect_right, color);
Ok(())
}
}
fn is_pixel_in_box(x: u32, y: u32, box_: &MarkupBox) -> bool {
x >= box_.left && x < box_.left + box_.width && y >= box_.top && y < box_.top + box_.height
}
fn is_pixel_in_boxes(x: u32, y: u32, boxes: &Vec<MarkupBox>) -> bool {
for box_ in boxes {
if is_pixel_in_box(x, y, box_) {
return true;
}
}
false
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/analysis.rs | src/analysis.rs | use std::sync::Arc;
use anyhow::Result;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use crate::{
image_archive::{ImageArchiver},
ocr::{CharacterRecognizer, RecognizeItem},
repository::{EntityImage, EntityText, Repository},
screenshot::Screenshot,
};
pub struct Analysis {
ocr: Arc<dyn CharacterRecognizer + Send + Sync>,
repo: Arc<dyn Repository + Send + Sync>,
archiver: Arc<dyn ImageArchiver + Send + Sync>,
}
impl Analysis {
pub fn new(
ocr: Arc<dyn CharacterRecognizer + Send + Sync>,
repo: Arc<dyn Repository + Send + Sync>,
archiver: Arc<dyn ImageArchiver + Send + Sync>,
) -> Self {
Self {
ocr,
repo,
archiver,
}
}
pub async fn record_screenshot(&self, screenshot: &Screenshot) -> Result<()> {
let archive = self.archiver.archive(screenshot).await?;
let entity_image = EntityImage::new(0, archive.archive_type, archive.archive_detail, screenshot.metadata.captured_at_epoch);
let entity_image = self.repo.save_image(&entity_image).await?;
let ocr_result: Vec<RecognizeItem> = self.ocr.recognize(&screenshot.image).await?;
let entity_texts: Vec<EntityText> = ocr_result
.iter()
.filter(|it| it.level == 5)
.filter_map(|it: &RecognizeItem| -> Option<EntityText> { it.try_into().ok() })
.map(|mut it| {
it.image_id = entity_image.id;
it
})
.collect();
self.repo.save_texts(&entity_texts).await?;
Ok(())
}
pub async fn search(&self, text: &str) -> Result<Vec<SearchResult>> {
let texts = self.repo.full_text_search(text).await?;
let result: Vec<SearchResult> = texts
.into_iter()
.group_by(|it| it.image_id)
.into_iter()
.map(|(image_id, group)| SearchResult::new(image_id, group.collect()))
.collect();
Ok(result)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SearchResult {
pub image_id: u32,
pub texts: Vec<EntityText>,
}
impl SearchResult {
pub fn new(image_id: u32, texts: Vec<EntityText>) -> Self {
Self { image_id, texts }
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/ocr.rs | src/ocr.rs | use anyhow::Ok;
use async_trait::async_trait;
#[derive(Debug, Clone)]
pub struct RecognizeItem {
pub text: String,
pub markup: MarkupBox,
pub level: u32,
}
impl RecognizeItem {
pub fn new(text: String, markup: MarkupBox, level: u32) -> Self {
Self {
text,
markup,
level,
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct MarkupBox {
pub left: u32,
pub top: u32,
pub width: u32,
pub height: u32,
}
impl MarkupBox {
pub fn new(left: u32, top: u32, width: u32, height: u32) -> Self {
Self {
left,
top,
width,
height,
}
}
pub fn new_i32(left: i32, top: i32, width: i32, height: i32) -> Self {
Self {
left: left as u32,
top: top as u32,
width: width as u32,
height: height as u32,
}
}
}
#[async_trait]
pub trait CharacterRecognizer {
async fn recognize(&self, image: &image::DynamicImage) -> anyhow::Result<Vec<RecognizeItem>>;
}
pub struct TesseractOCR {}
impl TesseractOCR {
pub fn new() -> Self {
Self {}
}
}
#[async_trait]
impl CharacterRecognizer for TesseractOCR {
async fn recognize(&self, image: &image::DynamicImage) -> anyhow::Result<Vec<RecognizeItem>> {
let default_args = rusty_tesseract::Args::default();
let ri = rusty_tesseract::Image::from_dynamic_image(image)?;
let output = rusty_tesseract::image_to_data(&ri, &default_args)?;
let result: Vec<RecognizeItem> = output
.data
.iter()
.map(|x| {
let text = x.text.clone();
let markup = MarkupBox::new_i32(x.left, x.top, x.width, x.height);
RecognizeItem::new(text, markup, x.level as u32)
})
.collect();
Ok(result)
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/screenshot.rs | src/screenshot.rs | use std::sync::{Arc, Mutex};
use anyhow::anyhow;
use async_trait::async_trait;
use image::DynamicImage;
use screenshots::{Image, Screen};
#[async_trait]
pub trait Capturer {
/// Capture the contents of all the screens, returning a vector of images.
async fn capture(&self) -> anyhow::Result<Vec<Screenshot>>;
}
pub struct DefaultCapturer {}
impl DefaultCapturer {
pub fn new() -> Self {
DefaultCapturer {}
}
}
#[derive(Debug)]
pub struct Metadata {
pub screen_id: u32,
pub captured_at_epoch: u64,
}
#[derive(Debug)]
pub struct Screenshot {
pub image: DynamicImage,
pub metadata: Metadata,
}
#[async_trait]
impl Capturer for DefaultCapturer {
async fn capture(&self) -> anyhow::Result<Vec<Screenshot>> {
let now_epoch = chrono::Utc::now().timestamp() as u64;
let screens = Screen::all()?;
let result: Vec<Screenshot> = Vec::new();
let result_mutex = Arc::new(Mutex::new(result));
// capture all screens concurrently
let mut tasks = Vec::new();
for screen in screens {
let result_mutex = result_mutex.clone();
let t = tokio::task::spawn_blocking(move || {
let capture = screen.capture().unwrap();
let image = screen_image_2_image_image(capture).unwrap();
let item = Screenshot {
image,
metadata: Metadata {
screen_id: screen.display_info.id,
captured_at_epoch: now_epoch,
},
};
result_mutex.lock().unwrap().push(item);
});
tasks.push(t);
}
// join all tasks
for task in tasks {
task.await?;
}
// collect results
let result = Arc::try_unwrap(result_mutex).unwrap().into_inner().unwrap();
Ok(result)
}
}
fn screen_image_2_image_image(screen_image: Image) -> anyhow::Result<DynamicImage> {
let buffer = screen_image.rgba().to_owned();
let image: image::RgbaImage = image::RgbaImage::from_raw(
screen_image.width() as u32,
screen_image.height() as u32,
buffer,
)
.ok_or(anyhow!("load screen image to image::RgbaImage"))?;
let result = DynamicImage::ImageRgba8(image);
Ok(result)
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/main.rs | src/main.rs |
use anyhow::Ok;
use anyhow::Result;
use axum::extract::MatchedPath;
use axum::http::Request;
use axum::routing::get;
use axum::{Extension, Router};
use crate::screenshot::Capturer;
use tokio::task::JoinHandle;
use core::panic;
use markup::ImageMarkupDecorator;
use sqlx_sqlite::SqlitePoolOptions;
use std::sync::Arc;
use std::time::Duration;
use tokio::signal;
use tokio_util::sync::CancellationToken;
use tower_http::trace::TraceLayer;
use tracing::info;
use tracing::info_span;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
mod analysis;
mod http;
mod image_archive;
mod markup;
mod ocr;
mod repository;
mod screenshot;
#[tokio::main]
async fn main() -> Result<()> {
let image_dir = format!(
"{}/{}/{}",
dirs::data_dir()
.expect("fetch data dir")
.to_str()
.expect("data dir path to string"),
"dejavu",
"images"
);
tokio::fs::create_dir_all(image_dir.clone()).await?;
let pool = SqlitePoolOptions::new()
.connect(
format!(
"{}/{}/{}",
dirs::data_dir()
.expect("fetch data dir")
.to_str()
.expect("data dir path to string"),
"dejavu",
"dejavu.db?mode=rwc"
)
.as_str(),
)
.await?;
let repo = repository::sqlite::SqliteRepository::new(pool);
repo.initialize().await?;
let repo_arc = Arc::new(repo);
let ocr_arc = Arc::new(ocr::TesseractOCR::new());
let archiver_arc = Arc::new(image_archive::fs::FileSystemImageArchiver::new(image_dir));
let analysis_arc: Arc<analysis::Analysis> = {
let repo_arc = repo_arc.clone();
let archiver_arc = archiver_arc.clone();
Arc::new(analysis::Analysis::new(ocr_arc, repo_arc, archiver_arc))
};
let token = CancellationToken::new();
let cloned_token = token.clone();
let capture_task = {
let analysis_arc = analysis_arc.clone();
tokio::task::spawn(async move {
let capturer = screenshot::DefaultCapturer::new();
let mut capture_interval = tokio::time::interval(Duration::from_secs(2));
loop {
if cloned_token.is_cancelled() {
break;
}
tokio::select! {
_ = cloned_token.cancelled() => {
info!("shutting down capture task");
break;
},
_ = capture_interval.tick()=>{
let captures = capturer.capture().await.unwrap();
let mut tasks : Vec<JoinHandle<()>> = Vec::new();
for item in captures {
let analysis = analysis_arc.clone();
let task = tokio::task::spawn(async move {
let result = analysis.record_screenshot(&item).await;
if let Err(e) = result {
info!("failed to record screenshot: {}", e);
}
});
tasks.push(task);
}
for task in tasks {
task.await.unwrap();
}
},
}
}
})
};
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| {
// axum logs rejections from built-in extractors with the `axum::rejection`
// target, at `TRACE` level. `axum::rejection=trace` enables showing those events
// "example_tracing_aka_logging=debug,tower_http=debug,axum::rejection=trace".into()
"info".into()
}),
)
.with(tracing_subscriber::fmt::layer())
.init();
let service_arc = {
let analysis_arc = analysis_arc.clone();
Arc::new(http::service::Service::new(
analysis_arc,
Arc::new(ImageMarkupDecorator::new()),
repo_arc.clone(),
archiver_arc.clone(),
))
};
let api_router = Router::new()
.route("/search", get(http::search))
.route("/image", get(http::fetch_image_with_markup));
let router = Router::new()
.nest("/api", api_router)
.fallback(http::frontend::static_handler)
.layer(Extension(service_arc.clone()))
.layer(
TraceLayer::new_for_http().make_span_with(|request: &Request<_>| {
// Log the matched route's path (with placeholders not filled in).
// Use request.uri() or OriginalUri if you want the real path.
let matched_path = request
.extensions()
.get::<MatchedPath>()
.map(MatchedPath::as_str);
info_span!(
"http_request",
method = ?request.method(),
matched_path,
some_other_field = tracing::field::Empty,
)
}),
);
let cloned_token = token.clone();
tokio::task::spawn(async move {
axum::Server::bind(&"0.0.0.0:12333".parse().unwrap())
.serve(router.into_make_service())
.with_graceful_shutdown(async {
cloned_token.cancelled().await;
})
.await
.unwrap();
});
let shutdown_guard = tokio::spawn(async move {
signal::ctrl_c().await.unwrap();
info!("Ctrl-C received, shutting down");
token.cancel();
});
shutdown_guard.await.unwrap();
capture_task.await.unwrap();
Ok(())
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/repository/sqlite.rs | src/repository/sqlite.rs | use super::{EntityImage, EntityText, Repository};
use anyhow::Result;
use async_trait::async_trait;
use futures::TryStreamExt;
use sqlx::Row;
pub struct SqliteRepository {
pool: sqlx::Pool<sqlx_sqlite::Sqlite>,
}
impl SqliteRepository {
pub fn new(pool: sqlx::Pool<sqlx_sqlite::Sqlite>) -> Self {
Self { pool }
}
pub async fn initialize(&self) -> Result<()> {
sqlx::query(
"CREATE TABLE IF NOT EXISTS images (
id INTEGER PRIMARY KEY,
archive_type TEXT NOT NULL,
archive_info TEXT NOT NULL,
captured_at_epoch INTEGER NOT NULL
)",
)
.execute(&self.pool)
.await?;
sqlx::query(
"CREATE TABLE IF NOT EXISTS texts (
id INTEGER PRIMARY KEY,
image_id INTEGER NOT NULL,
text TEXT NOT NULL,
left INTEGER NOT NULL,
top INTEGER NOT NULL,
width INTEGER NOT NULL,
height INTEGER NOT NULL
)",
)
.execute(&self.pool)
.await?;
sqlx::query(
"CREATE VIRTUAL TABLE IF NOT EXISTS text_fts USING fts5(text, text_id UNINDEXED)",
)
.execute(&self.pool)
.await?;
Ok(())
}
}
#[async_trait]
impl Repository for SqliteRepository {
async fn save_image(&self, entity: &EntityImage) -> Result<EntityImage> {
let query_result = sqlx::query(
"INSERT INTO images (archive_type, archive_info, captured_at_epoch) VALUES (?, ?, ?)",
)
.bind(&entity.archive_type)
.bind(&entity.archive_info)
.bind(&(entity.captured_at_epoch as i64))
.execute(&self.pool)
.await?;
let id = query_result.last_insert_rowid() as u32;
Ok(EntityImage {
id,
archive_type: entity.archive_type.clone(),
archive_info: entity.archive_info.clone(),
captured_at_epoch: entity.captured_at_epoch,
})
}
async fn get_image_by_id(&self, id: u32) -> Result<EntityImage> {
let query =
sqlx::query("SELECT archive_type, archive_info, captured_at_epoch FROM images WHERE id = ?").bind(id);
let row = query.fetch_one(&self.pool).await?;
let archive_type: String = row.get(0);
let archive_info: String = row.get(1);
let captured_at_epoch: i64 = row.get(2);
Ok(EntityImage {
id,
archive_type,
archive_info,
captured_at_epoch: captured_at_epoch.try_into()?,
})
}
async fn save_text(&self, entity: &EntityText) -> Result<EntityText> {
let query = sqlx::query(
"INSERT INTO texts (image_id, text, left, top, width, height) VALUES (?, ?, ?, ?, ?, ?)",
);
let query_result = query
.bind(entity.image_id)
.bind(&entity.text)
.bind(entity.left)
.bind(entity.top)
.bind(entity.width)
.bind(entity.height)
.execute(&self.pool)
.await?;
let id = query_result.last_insert_rowid() as u32;
// insert into table text_fts
let query = sqlx::query("INSERT INTO text_fts (text, text_id) VALUES (?, ?)");
query
.bind(&entity.text)
.bind(id)
.execute(&self.pool)
.await?;
Ok(EntityText {
id,
image_id: entity.image_id,
text: entity.text.clone(),
left: entity.left,
top: entity.top,
width: entity.width,
height: entity.height,
})
}
async fn save_texts(&self, entities: &[EntityText]) -> Result<Vec<EntityText>> {
let mut builder =
sqlx::QueryBuilder::new("INSERT INTO texts (image_id, text, left, top, width, height)");
builder.push_values(entities, |mut b, it| {
b.push(it.image_id)
// TODO: sqlx just concat the SQL string without quoting, so we have to do it manually.
// TODO: and it's not safe at all.
.push(format!("'{}'", it.text.clone().replace('\'', "''")))
.push(it.left)
.push(it.top)
.push(it.width)
.push(it.height);
});
let query = builder.build();
let execute_result = query.execute(&self.pool).await?;
let rows_affected = execute_result.rows_affected();
let last_insert_rowid = execute_result.last_insert_rowid();
let id_start = 1 + last_insert_rowid as u32 - rows_affected as u32;
let result = entities
.iter()
.enumerate()
.map(|(i, it)| EntityText {
id: id_start + i as u32,
image_id: it.image_id,
// TODO: sqlx just concat the SQL string without quoting, so we have to do it manually.
// TODO: and it's not safe at all.
text: (format!("'{}'", it.text.clone().replace('\'', "''"))),
left: it.left,
top: it.top,
width: it.width,
height: it.height,
})
.collect();
let mut builder = sqlx::QueryBuilder::new("INSERT INTO text_fts (text, text_id)");
builder.push_values(&result, |mut b, it: &EntityText| {
b.push(it.text.clone()).push(it.id);
});
let query = builder.build();
query.execute(&self.pool).await?;
Ok(result)
}
async fn get_text_by_id(&self, id: u32) -> Result<EntityText> {
let query =
sqlx::query("SELECT image_id, text, left, top, width, height FROM texts WHERE id = ?")
.bind(id);
let row = query.fetch_one(&self.pool).await?;
let image_id: u32 = row.get(0);
let text: String = row.get(1);
let left: u32 = row.get(2);
let top: u32 = row.get(3);
let width: u32 = row.get(4);
let height: u32 = row.get(5);
Ok(EntityText {
id,
image_id,
text,
left,
top,
width,
height,
})
}
async fn full_text_search(&self, text: &str) -> Result<Vec<EntityText>> {
let query = sqlx::query("SELECT text_id FROM text_fts WHERE text_fts MATCH ?1").bind(text);
let mut rows = query.fetch(&self.pool);
let mut result = vec![];
while let Some(row) = rows.try_next().await? {
let text_id: u32 = row.get(0);
let entity = self.get_text_by_id(text_id).await?;
result.push(entity);
}
Ok(result)
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/repository/mod.rs | src/repository/mod.rs | use async_trait::async_trait;
use serde::{Deserialize, Serialize};
pub mod in_memory;
pub mod sqlite;
#[derive(Debug, Clone)]
pub struct EntityImage {
pub id: u32,
pub archive_type: String,
pub archive_info: String,
pub captured_at_epoch: u64,
}
impl EntityImage {
pub fn new(
id: u32,
archive_type: String,
archive_info: String,
captured_at_epoch: u64,
) -> Self {
Self {
id,
archive_type,
archive_info,
captured_at_epoch,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EntityText {
pub id: u32,
pub image_id: u32,
pub text: String,
pub left: u32,
pub top: u32,
pub width: u32,
pub height: u32,
}
impl EntityText {
pub fn new(
id: u32,
image_id: u32,
text: String,
left: u32,
top: u32,
width: u32,
height: u32,
) -> Self {
Self {
id,
image_id,
text,
left,
top,
width,
height,
}
}
}
impl TryFrom<&crate::ocr::RecognizeItem> for EntityText {
type Error = anyhow::Error;
fn try_from(value: &crate::ocr::RecognizeItem) -> anyhow::Result<Self> {
let value = value.clone();
Ok(Self::new(
0,
0,
value.text,
value.markup.left,
value.markup.top,
value.markup.width,
value.markup.height,
))
}
}
#[async_trait]
pub trait Repository {
async fn save_image(&self, entity: &EntityImage) -> anyhow::Result<EntityImage>;
async fn get_image_by_id(&self, id: u32) -> anyhow::Result<EntityImage>;
async fn save_text(&self, entity: &EntityText) -> anyhow::Result<EntityText>;
async fn save_texts(&self, entities: &[EntityText]) -> anyhow::Result<Vec<EntityText>>;
async fn get_text_by_id(&self, id: u32) -> anyhow::Result<EntityText>;
async fn full_text_search(&self, text: &str) -> anyhow::Result<Vec<EntityText>>;
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/repository/in_memory.rs | src/repository/in_memory.rs | #[cfg(feature = "in-memory")]
use {async_trait::async_trait, tokio::sync::Mutex, super::{EntityImage, EntityText, Repository}};
#[cfg(feature = "in-memory")]
pub struct InMemoryRepository {
images: Mutex<Vec<EntityImage>>,
texts: Mutex<Vec<EntityText>>,
}
#[cfg(feature = "in-memory")]
impl InMemoryRepository {
pub fn new() -> Self {
Self {
images: Mutex::new(vec![]),
texts: Mutex::new(vec![]),
}
}
}
// implement Repository trait for InMemoryRepository
#[cfg(feature = "in-memory")]
#[async_trait]
impl Repository for InMemoryRepository {
async fn save_image(&self, entity: &EntityImage) -> anyhow::Result<EntityImage> {
let mut entity = entity.clone();
let length = self.images.lock().await.len();
entity.id = length as u32;
Ok(entity)
}
async fn get_image_by_id(&self, id: u32) -> anyhow::Result<EntityImage> {
let guard = self.images.lock().await;
let entity = guard
.iter()
.find(|it| it.id == id)
.ok_or(anyhow::anyhow!("not found"))?;
Ok(entity.clone())
}
async fn save_text(&self, entity: &EntityText) -> anyhow::Result<EntityText> {
let mut entity = entity.clone();
let mut guard = self.texts.lock().await;
entity.id = guard.len() as u32;
guard.push(entity.clone());
Ok(entity)
}
async fn save_texts(&self, entities: &[EntityText]) -> anyhow::Result<Vec<EntityText>> {
let mut result = Vec::new();
for entity in entities.iter() {
let mut guard = self.texts.lock().await;
let mut entity = entity.clone();
entity.id = guard.len() as u32;
guard.push(entity.clone());
result.push(entity);
}
Ok(result)
}
async fn get_text_by_id(&self, id: u32) -> anyhow::Result<EntityText> {
let entity = self
.texts
.lock()
.await
.iter()
.find(|it| it.id == id)
.cloned()
.ok_or(anyhow::anyhow!("not found"))?;
Ok(entity)
}
/// it's not a real full text search, just a simple filter for demo
async fn full_text_search(&self, text: &str) -> anyhow::Result<Vec<EntityText>> {
let entities = self
.texts
.lock()
.await
.iter()
.filter(|it| it.text.contains(text))
.cloned()
.collect();
Ok(entities)
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/image_archive/fs.rs | src/image_archive/fs.rs | use async_trait::async_trait;
use std::io::Cursor;
use crate::screenshot::Screenshot;
use super::{ImageArchive, ImageArchiver};
pub struct FileSystemImageArchiver {
storage_path: String,
}
impl FileSystemImageArchiver {
pub fn new(storage_path: String) -> Self {
Self { storage_path }
}
}
#[async_trait]
impl ImageArchiver for FileSystemImageArchiver {
async fn load(&self, image_archive: &ImageArchive) -> anyhow::Result<image::DynamicImage> {
let path = format!("{}/{}", self.storage_path, image_archive.archive_detail);
let image = image::open(path)?;
Ok(image)
}
async fn archive(&self, screenshot: &Screenshot) -> anyhow::Result<ImageArchive> {
// filename format YYYY-MM-DD-HH-MM-SS
let filename = chrono::Local::now().format("%Y-%m-%d-%H-%M-%S").to_string();
let filename =
format!("{}-{}.jpg", filename.clone(), screenshot.metadata.screen_id).to_string();
let path = format!("{}/{}", self.storage_path, filename);
// encode the image as JPG
let mut buffer = Cursor::new(Vec::new());
screenshot
.image
.write_to(&mut buffer, image::ImageOutputFormat::Jpeg(90))?;
let buffer = buffer.into_inner();
tokio::fs::write(path, buffer).await?;
Ok(ImageArchive {
archive_type: "file_system".to_string(),
archive_detail: filename,
})
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/image_archive/mod.rs | src/image_archive/mod.rs | use async_trait::async_trait;
use crate::screenshot::Screenshot;
pub mod fs;
pub mod in_memory;
pub struct ImageArchive {
pub archive_type: String,
pub archive_detail: String,
}
impl ImageArchive {
pub fn new(archive_type: String, archive_detail: String) -> Self { Self { archive_type, archive_detail } }
}
#[async_trait]
pub trait ImageArchiver {
async fn load(&self, image_archive: &ImageArchive) -> anyhow::Result<image::DynamicImage>;
async fn archive(&self, screenshot: &Screenshot) -> anyhow::Result<ImageArchive>;
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/image_archive/in_memory.rs | src/image_archive/in_memory.rs | #[cfg(feature = "in-memory")]
use {
super::{ImageArchive, ImageArchiver},
crate::screenshot::Screenshot,
async_trait::async_trait,
std::collections::HashMap,
tokio::sync::Mutex,
uuid::Uuid,
};
#[cfg(feature = "in-memory")]
pub struct InMemoryImageArchiver {
// storage: HashMap<UUID, image::RgbImage>,
pub storage: Mutex<HashMap<String, image::DynamicImage>>,
}
#[cfg(feature = "in-memory")]
impl InMemoryImageArchiver {
pub fn new() -> Self {
Self {
storage: Mutex::new(HashMap::new()),
}
}
}
#[cfg(feature = "in-memory")]
#[async_trait]
impl ImageArchiver for InMemoryImageArchiver {
async fn load(&self, image_archive: &ImageArchive) -> anyhow::Result<image::DynamicImage> {
let storage = self.storage.lock().await;
let image = storage.get(&image_archive.archive_detail);
match image {
Some(image) => Ok(image.clone()),
None => Err(anyhow::anyhow!("image not found")),
}
}
async fn archive(&self, screenshot: &Screenshot) -> anyhow::Result<ImageArchive> {
let mut storage = self.storage.lock().await;
let uuid = Uuid::new_v4().to_string();
storage.insert(uuid.clone(), screenshot.image.clone());
Ok(ImageArchive {
archive_type: "in_memory".to_string(),
archive_detail: uuid,
})
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/http/error.rs | src/http/error.rs | use axum::{
http::StatusCode,
response::{IntoResponse, Response},
};
pub struct HttpError(anyhow::Error);
impl IntoResponse for HttpError {
fn into_response(self) -> Response {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Something went wrong: {}", self.0),
)
.into_response()
}
}
impl<E> From<E> for HttpError
where
E: Into<anyhow::Error>,
{
fn from(err: E) -> Self {
Self(err.into())
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/http/service.rs | src/http/service.rs | use std::sync::Arc;
use image::DynamicImage;
use crate::{
analysis::{Analysis, SearchResult},
http::error::HttpError,
image_archive::{ImageArchive, ImageArchiver},
markup::ImageMarkupDecorator,
ocr::MarkupBox,
repository::Repository,
};
/// Adhoc service layer for web server
pub struct Service {
analysis: Arc<Analysis>,
markup_decorator: Arc<ImageMarkupDecorator>,
repo: Arc<dyn Repository + Send + Sync>,
image_archiver: Arc<dyn ImageArchiver + Send + Sync>,
}
impl Service {
pub fn new(
analysis: Arc<Analysis>,
markup_decorator: Arc<ImageMarkupDecorator>,
repo: Arc<dyn Repository + Send + Sync>,
image_archiver: Arc<dyn ImageArchiver + Send + Sync>,
) -> Self {
Self {
analysis,
markup_decorator,
repo,
image_archiver,
}
}
pub async fn search(&self, text: &str) -> Result<Vec<SearchResult>, HttpError> {
let result = self.analysis.search(text).await?;
Ok(result)
}
pub async fn fetch_image_with_markup(
&self,
image_id: u32,
text_ids: &Vec<u32>,
) -> Result<DynamicImage, HttpError> {
let entity_image = self.repo.get_image_by_id(image_id).await?;
let image_archive = ImageArchive::new(entity_image.archive_type, entity_image.archive_info);
let loaded = self.image_archiver.load(&image_archive).await?;
let mut markups = Vec::new();
for text_id in text_ids {
let entity_text = self.repo.get_text_by_id(*text_id).await?;
let markup_box = MarkupBox::new(
entity_text.left,
entity_text.top,
entity_text.width,
entity_text.height,
);
markups.push(markup_box);
}
let marked = self
.markup_decorator
.markup_recognition(&loaded, &markups)?;
Ok(marked)
}
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/http/mod.rs | src/http/mod.rs | use self::{error::HttpError, service::Service};
use crate::analysis::SearchResult;
use axum::{extract::Query, http::header, response::IntoResponse, Extension, Json};
use image::{ImageOutputFormat};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
pub mod error;
pub mod service;
pub mod frontend;
#[derive(Deserialize, Serialize)]
pub struct SearchQuery {
text: String,
}
pub async fn search(
Extension(service): Extension<Arc<Service>>,
Query(query): Query<SearchQuery>,
) -> Result<Json<Vec<SearchResult>>, HttpError> {
let result = service.clone().search(&query.text).await?;
Ok(Json(result))
}
#[derive(Deserialize, Serialize)]
pub struct ImageWithMarkupQuery {
image_id: u32,
/// comma separated list of text ids
text_ids: String,
}
pub async fn fetch_image_with_markup(
Extension(service): Extension<Arc<Service>>,
Query(query): Query<ImageWithMarkupQuery>,
) -> Result<impl IntoResponse, HttpError> {
let text_ids = query
.text_ids
.split(',')
.map(|id| id.parse::<u32>().unwrap())
.collect::<Vec<u32>>();
let marked = service
.clone()
.fetch_image_with_markup(query.image_id, &text_ids)
.await?;
use std::io::{BufWriter, Cursor};
let mut buffer = BufWriter::new(Cursor::new(Vec::new()));
marked.write_to(&mut buffer, ImageOutputFormat::Jpeg(90)).unwrap();
let bytes: Vec<u8> = buffer.into_inner().unwrap().into_inner();
Ok((
axum::response::AppendHeaders([(header::CONTENT_TYPE, "image/jpeg")]),
bytes,
))
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
STRRL/dejavu | https://github.com/STRRL/dejavu/blob/7c1d1870e0fc4a5bca9863b97d95584814dcb2ea/src/http/frontend.rs | src/http/frontend.rs | use axum::{
http::{header, StatusCode, Uri},
response::{Html, IntoResponse, Response},
};
use rust_embed::RustEmbed;
#[derive(RustEmbed)]
#[folder = "webui/out/"]
struct Assets;
static INDEX_HTML: &str = "index.html";
pub async fn static_handler(uri: Uri) -> impl IntoResponse {
let path = uri.path().trim_start_matches('/');
if path.is_empty() || path == INDEX_HTML {
return index_html().await;
}
match Assets::get(path) {
Some(content) => {
let mime = mime_guess::from_path(path).first_or_octet_stream();
([(header::CONTENT_TYPE, mime.as_ref())], content.data).into_response()
}
None => {
if path.contains('.') {
return not_found().await;
}
index_html().await
}
}
}
async fn index_html() -> Response {
match Assets::get(INDEX_HTML) {
Some(content) => Html(content.data).into_response(),
None => not_found().await,
}
}
async fn not_found() -> Response {
(StatusCode::NOT_FOUND, "404").into_response()
}
| rust | MIT | 7c1d1870e0fc4a5bca9863b97d95584814dcb2ea | 2026-01-04T20:21:29.042566Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/park.rs | src/park.rs | use crate::os::{Os, Target};
/// Trait for implementing the parking / unparking threads.
pub trait Park: Default + Send + Sync + 'static {
/// The park routine; should put the processor or thread to sleep in order
/// to save CPU cycles and power, until the hardware tells it to wake up.
fn park(&self);
/// Wake the processor or thread.
fn unpark(&self);
}
#[derive(Debug, Default)]
pub struct DefaultPark(<Os as Target>::ParkCx);
impl Park for DefaultPark {
// Park the current thread.
#[inline(always)]
fn park(&self) {
Os.park(&self.0);
}
// Unpark the parked thread
#[inline(always)]
fn unpark(&self) {
Os.unpark(&self.0);
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/lib.rs | src/lib.rs | //! **Minimal asynchronous runtime for Rust**
//!
//! # Optional Features
//! Only the _`std`_ feature is enabled by default
//!
//! - Disable _`std`_ to use pasts without the standard library.
//! - Enable _`web`_ to use pasts within the javascript DOM.
//!
//! # Getting Started
//!
//! Add the following to your **`./Cargo.toml`**:
//! ```toml
//! [dependencies]
//! pasts = "1.0.0"
//!
//! ## This example uses async_main for convenience, but it is *not* required to
//! ## use pasts.
//! async_main = { version = "0.4.0", features = ["pasts"] }
//!
//! ## This example uses async-std for a sleep future, but async-std is *not*
//! ## required to use pasts.
//! async-std = "1.13.2"
//!
//! ## Also not required for pasts, but allows for portability with WebAssembly
//! ## in the browser.
//! [features]
//! web = ["async_main/web", "pasts/web"]
//! ```
#![no_std]
#![forbid(unsafe_code, missing_docs)]
#![deny(
rustdoc::broken_intra_doc_links,
rustdoc::private_intra_doc_links,
rustdoc::missing_crate_level_docs,
rustdoc::private_doc_tests,
rustdoc::invalid_codeblock_attributes,
rustdoc::invalid_html_tags,
rustdoc::invalid_rust_codeblocks,
rustdoc::bare_urls,
rustdoc::unescaped_backticks,
rustdoc::redundant_explicit_links
)]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
#![allow(clippy::needless_doctest_main)]
#![doc(
html_logo_url = "https://ardaku.github.io/mm/logo.svg",
html_favicon_url = "https://ardaku.github.io/mm/icon.svg",
html_root_url = "https://docs.rs/pasts"
)]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
mod executor;
mod future;
mod os;
mod park;
mod pool;
pub use self::{
executor::Executor,
future::{BoxFuture, LocalBoxFuture},
park::Park,
pool::Pool,
};
/// Indicates whether a value is available or if the current task has been
/// scheduled to receive a wakeup instead.
pub type Poll<T = ()> = core::task::Poll<T>;
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/future.rs | src/future.rs | use alloc::boxed::Box;
use core::pin::Pin;
/// An owned dynamically typed [`Future`] for use in cases where you can’t
/// statically type your result or need to add some indirection.
pub type BoxFuture<'a, T = ()> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;
/// [`BoxFuture`] without the [`Send`] requirement.
pub type LocalBoxFuture<'a, T = ()> = Pin<Box<dyn Future<Output = T> + 'a>>;
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/os.rs | src/os.rs | // Daku
#[cfg_attr(all(target_arch = "wasm32", daku), path = "os/daku.rs")]
// Std
#[cfg_attr(
all(
feature = "std",
not(all(target_arch = "wasm32", any(daku, feature = "web")))
),
path = "os/std.rs"
)]
// Web
#[cfg_attr(all(target_arch = "wasm32", feature = "web"), path = "os/web.rs")]
mod target;
use alloc::{boxed::Box, sync::Arc, task::Wake, vec::Vec};
use core::{fmt::Debug, pin::Pin, task::Context};
use crate::{LocalBoxFuture, Park, Poll, Pool};
/// Implement `Target for Os` to add platform support for a target.
pub(crate) struct Os;
/// Target platform support
pub(crate) trait Target: Sized {
type ParkCx: Debug + Default;
/// Stop doing anything on the CPU or thread until unparked.
#[inline(always)]
fn park(self, _park_cx: &Self::ParkCx) {
// Default implementation doesn't actually park for maximum portability.
//
// Hint at spin loop to possibly save CPU time with a short sleep.
core::hint::spin_loop();
}
/// Resume execution on the parked CPU or thread.
#[inline(always)]
fn unpark(self, _park_cx: &Self::ParkCx) {}
/// Spawn a local future.
#[inline(always)]
fn spawn<P: Pool>(self, pool: &P, f: impl Future<Output = ()> + 'static) {
self.spawn_boxed(pool, Box::pin(f));
}
/// Spawn a local future.
#[inline(always)]
fn spawn_boxed<P: Pool>(self, pool: &P, f: LocalBoxFuture<'static>) {
pool.push(f);
}
/// Block on a local future (if the platform allows it, otherwise spawn).
#[inline(always)]
fn block_on<P: Pool>(
self,
pool: &P,
f: impl Future<Output = ()> + 'static,
) {
struct Unpark<P: Park>(P);
impl<P: Park> Wake for Unpark<P> {
#[inline(always)]
fn wake(self: Arc<Self>) {
self.wake_by_ref();
}
#[inline(always)]
fn wake_by_ref(self: &Arc<Self>) {
self.0.unpark();
}
}
// Box and pin main task
let f: LocalBoxFuture<'_> = Box::pin(f);
// Set up the notify
let tasks = &mut Vec::new();
// Set up the park, waker, and context
let unpark = Arc::new(Unpark(<P as Pool>::Park::default()));
let waker = unpark.clone().into();
let cx = &mut Context::from_waker(&waker);
// Which task's turn it is (for basic fairness)
let mut index = 0;
// Spawn main task
tasks.push(f);
// Run the set of futures to completion.
while !tasks.is_empty() {
// Wrap index
index %= tasks.len();
// Poll the entire set of futures on wake
let poll = 'poll: {
for (i, this) in tasks.iter_mut().skip(index).enumerate() {
if let Poll::Ready(()) = Pin::new(this).poll(cx) {
break 'poll Poll::Ready(i);
}
}
for (i, this) in tasks.iter_mut().take(index).enumerate() {
if let Poll::Ready(()) = Pin::new(this).poll(cx) {
break 'poll Poll::Ready(i);
}
}
// Take turns which task polls first
index += 1;
break 'poll Poll::Pending;
};
// If no tasks have completed, then park
let Poll::Ready(task_index) = poll else {
// Initiate execution of any spawned tasks - if no new tasks,
// park
if !pool.drain(tasks) {
unpark.0.park();
}
continue;
};
// Task has completed, drop it
drop(tasks.swap_remove(task_index));
// Drain any spawned tasks into the pool
pool.drain(tasks);
}
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/executor.rs | src/executor.rs | use alloc::sync::Arc;
use core::fmt;
use crate::{
LocalBoxFuture,
os::{Os, Target},
pool::{DefaultPool, Pool},
};
/// Pasts' executor.
///
/// # Run a Future
///
/// It's relatively simple to block on a future, and run it to completion:
///
/// ```rust
#[doc = include_str!("../examples/executor.rs")]
/// ```
///
/// # Spawn a Future
///
/// You may spawn tasks on an `Executor`. Only once all tasks have completed,
/// can [`block_on()`](Executor::block_on()) return.
/// ```rust,no_run
#[doc = include_str!("../examples/spawn.rs")]
/// ```
///
/// # Recursive `block_on()`
///
/// One cool feature about the pasts executor is that you can run it from within
/// the context of another:
/// ```rust
#[doc = include_str!("../examples/recursive.rs")]
/// ```
///
/// Or even resume the executor from within it's own context:
/// ```rust
#[doc = include_str!("../examples/resume.rs")]
/// ```
pub struct Executor<P: Pool = DefaultPool>(Arc<P>);
impl Default for Executor {
fn default() -> Self {
Self::new(DefaultPool::default())
}
}
impl<P: Pool> Clone for Executor<P> {
fn clone(&self) -> Self {
Self(Arc::clone(&self.0))
}
}
impl<P: Pool + fmt::Debug> fmt::Debug for Executor<P> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Executor").field(&self.0).finish()
}
}
impl<P: Pool> Executor<P> {
/// Create a new executor that can only spawn tasks from the current thread.
///
/// Custom executors can be built by implementing [`Pool`].
#[inline(always)]
pub fn new(pool: P) -> Self {
Self(Arc::new(pool))
}
/// Block on a future and return it's result.
///
/// # Platform-Specific Behavior
///
/// When building with feature _`web`_, spawns task and returns
/// immediately instead of blocking.
#[inline(always)]
pub fn block_on(self, f: impl Future<Output = ()> + 'static) {
Os.block_on(&*self.0, f);
}
}
impl<P: Pool> Executor<P> {
/// Spawn a [`LocalBoxFuture`] on this executor.
///
/// Execution of the [`LocalBoxFuture`] will halt after the first poll that
/// returns [`Ready`](Poll::Ready).
#[inline(always)]
pub fn spawn_future(&self, f: LocalBoxFuture<'static>) {
Os.spawn_boxed(&*self.0, f);
}
/// Box and spawn a future on this executor.
#[inline(always)]
pub fn spawn_boxed(&self, f: impl Future<Output = ()> + 'static) {
Os.spawn(&*self.0, f);
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/pool.rs | src/pool.rs | use alloc::vec::Vec;
use core::{cell::Cell, fmt};
use crate::{LocalBoxFuture, Park, park::DefaultPark};
/// Storage for a task pool.
///
/// # Implementing `Pool` For A Custom Executor
///
/// This example shows how to create a custom single-threaded executor using
/// [`Executor::new()`](crate::Executor::new).
///
/// ```rust
#[doc = include_str!("../examples/pool.rs")]
/// ```
pub trait Pool {
/// Type that handles the sleeping / waking of the executor.
type Park: Park;
/// Push a task into the thread pool queue.
fn push(&self, task: LocalBoxFuture<'static>);
/// Drain tasks from the thread pool queue to a `Vec`. Should returns true
/// if drained at least one task.
fn drain(&self, tasks: &mut Vec<LocalBoxFuture<'static>>) -> bool;
}
#[derive(Default)]
pub struct DefaultPool {
spawning_queue: Cell<Vec<LocalBoxFuture<'static>>>,
}
impl fmt::Debug for DefaultPool {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let queue = self.spawning_queue.take();
f.debug_struct("DefaultPool")
.field("spawning_queue.len()", &queue.len())
.finish()?;
self.spawning_queue.set(queue);
Ok(())
}
}
impl Pool for DefaultPool {
type Park = DefaultPark;
// Push onto queue of tasks to spawn.
#[inline(always)]
fn push(&self, task: LocalBoxFuture<'static>) {
let mut queue = self.spawning_queue.take();
queue.push(task);
self.spawning_queue.set(queue);
}
// Drain from queue of tasks to spawn.
#[inline(always)]
fn drain(&self, tasks: &mut Vec<LocalBoxFuture<'static>>) -> bool {
let mut queue = self.spawning_queue.take();
let mut drained = queue.drain(..).peekable();
let has_drained = drained.peek().is_some();
tasks.extend(drained);
self.spawning_queue.set(queue);
has_drained
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/os/daku.rs | src/os/daku.rs | use super::{Os, Target};
#[derive(Debug, Default)]
pub(crate) struct ParkCx;
impl Target for Os {
type ParkCx = ParkCx;
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/os/target.rs | src/os/target.rs | //! Unknown target, fake implementation.
//!
//! This can be used as a template when adding new target support.
use super::{Os, Target};
/// Default parking implementation doesn't need any state
#[derive(Debug, Default)]
pub(crate) struct ParkCx;
impl Target for Os {
type ParkCx = ParkCx;
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/os/web.rs | src/os/web.rs | use super::{Os, Target};
use crate::{LocalBoxFuture, Pool};
#[derive(Debug, Default)]
pub(crate) struct ParkCx;
impl Target for Os {
type ParkCx = ParkCx;
#[inline(always)]
fn park(self, _park_cx: &Self::ParkCx) {
// Spin loop hints aren't useful on the web since nothing blocks, so do
// nothing instead.
}
/// Spawn a local future.
#[inline(always)]
fn spawn<P: Pool>(self, _pool: &P, f: impl Future<Output = ()> + 'static) {
wasm_bindgen_futures::spawn_local(f);
}
/// Spawn a local future.
#[inline(always)]
fn spawn_boxed<P: Pool>(self, pool: &P, f: LocalBoxFuture<'static>) {
self.spawn(pool, f);
}
#[inline(always)]
fn block_on<P: Pool>(
self,
pool: &P,
f: impl Future<Output = ()> + 'static,
) {
self.spawn(pool, f);
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/src/os/std.rs | src/os/std.rs | use std::{
sync::atomic::{AtomicBool, Ordering},
thread::{self, Thread},
};
use super::{Os, Target};
#[derive(Debug)]
pub(crate) struct ParkCx {
is_parked: AtomicBool,
thread: Thread,
}
impl Default for ParkCx {
fn default() -> Self {
Self {
is_parked: AtomicBool::new(true),
thread: thread::current(),
}
}
}
impl Target for Os {
type ParkCx = ParkCx;
fn park(self, park_cx: &Self::ParkCx) {
// Loop until old `is_parked` value is `false`, this function will exit
// immediately on the first call in case `unpark()` was called after the
// decision to park.
while park_cx.is_parked.swap(true, Ordering::Relaxed) {
// Park the thread until either the OS gives a spurious wake up, or
// `unpark` is called.
thread::park();
}
}
fn unpark(self, park_cx: &Self::ParkCx) {
// Unpark the thread, but only if the thread is set to parked.
if park_cx.is_parked.swap(false, Ordering::Relaxed) {
park_cx.thread.unpark();
}
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/tests/spawn.rs | tests/spawn.rs | use pasts::Executor;
use whisk::Channel;
#[test]
fn spawn_inside_block_on() {
let executor = Executor::default();
let channel = Channel::new();
let sender = channel.clone();
executor.clone().block_on(async move {
executor.spawn_boxed(async move {
sender.send(0xDEADBEEFu32).await;
});
});
Executor::default().block_on(async move {
assert_eq!(0xDEADBEEFu32, channel.recv().await);
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/docs/src/main.rs | docs/src/main.rs | use std::fs;
fn main() -> std::io::Result<()> {
fs::remove_dir_all("./gen-docs/")?;
fs::create_dir_all("./gen-docs/")?;
fs::copy("./examples/counter/build.rs", "./gen-docs/build.rs")?;
fs::copy("./examples/counter/src/main.rs", "./gen-docs/counter.rs")?;
fs::copy("./examples/spawn.rs", "./gen-docs/spawn.rs")?;
fs::copy("./examples/slices.rs", "./gen-docs/slices.rs")?;
fs::copy("./examples/tasks.rs", "./gen-docs/tasks.rs")?;
Ok(())
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/spawn.rs | examples/spawn.rs | use core::time::Duration;
use pasts::Executor;
async fn sleep(seconds: f64) {
async_std::task::sleep(Duration::from_secs_f64(seconds)).await;
}
fn main() {
let executor = Executor::default();
// Spawn before blocking puts the task on a queue.
executor.spawn_boxed(async {
sleep(3.0).await;
println!("3 seconds");
});
// Calling `block_on()` starting executing queued tasks.
executor.clone().block_on(async move {
// Spawn tasks (without being queued)
executor.spawn_boxed(async {
sleep(1.0).await;
println!("1 second");
});
executor.spawn_boxed(async {
sleep(2.0).await;
println!("2 seconds");
});
// Finish this task before spawned tasks will complete.
sleep(0.5).await;
println!("½ second");
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/timer.rs | examples/timer.rs | use core::time::Duration;
use async_std::task;
fn main() {
pasts::Executor::default().block_on(async {
println!("Waiting 2 seconds…");
task::sleep(Duration::new(2, 0)).await;
println!("Waited 2 seconds.");
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/executor.rs | examples/executor.rs | use pasts::Executor;
fn main() {
Executor::default().block_on(async {
println!("Hello from a future!");
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/recursive.rs | examples/recursive.rs | use pasts::Executor;
fn main() {
Executor::default().block_on(async {
Executor::default().block_on(async {
println!("Hello from the future running on the inner executor!");
});
println!("Hello from the future running on the outer executor!");
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/resume.rs | examples/resume.rs | use pasts::Executor;
fn main() {
let executor = Executor::default();
executor.clone().block_on(async move {
println!("Hello from a future!");
executor.block_on(async {
println!("Resuming execution from within the executor context!");
});
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/pool.rs | examples/pool.rs | use std::{
cell::Cell,
thread::{self, Thread},
};
use pasts::{Executor, LocalBoxFuture, Park, Pool};
#[derive(Default)]
struct SingleThreadedPool {
spawning_queue: Cell<Vec<LocalBoxFuture<'static>>>,
}
impl Pool for SingleThreadedPool {
type Park = ThreadPark;
fn push(&self, task: LocalBoxFuture<'static>) {
let mut queue = self.spawning_queue.take();
queue.push(task);
self.spawning_queue.set(queue);
}
fn drain(&self, tasks: &mut Vec<LocalBoxFuture<'static>>) -> bool {
let mut queue = self.spawning_queue.take();
let mut drained = queue.drain(..).peekable();
let has_drained = drained.peek().is_some();
tasks.extend(drained);
self.spawning_queue.set(queue);
has_drained
}
}
struct ThreadPark(Thread);
impl Default for ThreadPark {
fn default() -> Self {
Self(thread::current())
}
}
impl Park for ThreadPark {
fn park(&self) {
thread::park();
}
fn unpark(&self) {
self.0.unpark();
}
}
fn main() {
// Create a custom executor.
let executor = Executor::new(SingleThreadedPool::default());
// Block on a future
executor.block_on(async {
println!("Hi from inside a future!");
});
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
ardaku/pasts | https://github.com/ardaku/pasts/blob/948494403c9a96d7d8ac59ac838cbc4fbb390b9f/examples/no-std/src/main.rs | examples/no-std/src/main.rs | //! Use pasts on no-std, specifically targeting x86_64-unknown-linux-gnu (may
//! work on others). Requires nightly for `eh_personality` lang item (tested
//! with `rustc 1.72.0-nightly (871b59520 2023-05-31)`).
//!
//! ```shell
//!
//! cargo +nightly run
//! ```
#![no_std]
#![no_main]
#![feature(lang_items)]
extern crate alloc;
#[global_allocator]
static _GLOBAL_ALLOCATOR: rlsf::SmallGlobalTlsf = rlsf::SmallGlobalTlsf::new();
#[lang = "eh_personality"]
extern "C" fn _eh_personality() {}
#[no_mangle]
extern "C" fn _Unwind_Resume() {}
fn print(string: &str) {
use core::ffi::{c_int, c_void};
#[link(name = "c")]
extern "C" {
fn write(fd: c_int, buf: *const c_void, count: usize) -> isize;
}
unsafe { write(0, string.as_ptr().cast(), string.len()) };
}
#[panic_handler]
fn yeet(panic: &::core::panic::PanicInfo<'_>) -> ! {
print(&panic.to_string());
print("\n");
loop {}
}
//// End no-std specific boilerplate ////
use alloc::string::ToString;
use async_main::{async_main, LocalSpawner};
use pasts::{prelude::*, Loop};
struct State {
// Spawned tasks
tasks: [LocalBoxNotify<'static, &'static str>; 2],
}
impl State {
fn task_done(&mut self, (_id, text): (usize, &str)) -> Poll {
print(text);
print("\n");
Pending
}
}
async fn task_one() -> &'static str {
print("Task 1...\n");
"Hello"
}
async fn task_two() -> &'static str {
print("Task 2...\n");
"World"
}
#[async_main]
async fn main(_spawner: LocalSpawner) {
// create two tasks to spawn
let task_one = Box::pin(task_one().fuse());
let task_two = Box::pin(task_two().fuse());
// == Allocations end ==
// create array of tasks to spawn
let state = &mut State {
tasks: [task_one, task_two],
};
Loop::new(state)
.on(|s| s.tasks.as_mut_slice(), State::task_done)
.await;
}
mod main {
#[no_mangle]
extern "C" fn main() -> ! {
super::main();
loop {}
}
}
| rust | Apache-2.0 | 948494403c9a96d7d8ac59ac838cbc4fbb390b9f | 2026-01-04T20:21:29.518692Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/lib.rs | crates/wsrx/src/lib.rs | //! WebSocket Reflector X
//!
//! A simple crate that proxies pure TCP connections to WebSocket connections
//! and vice versa.
pub mod proxy;
#[cfg(feature = "client")]
pub mod utils;
#[cfg(feature = "client")]
pub mod tunnel;
pub use proxy::{Error, Message, WrappedWsStream, proxy};
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/utils.rs | crates/wsrx/src/utils.rs | use std::net::ToSocketAddrs;
use axum::http::StatusCode;
use tokio::net::TcpListener;
#[cfg(feature = "log")]
use tracing::error;
/// Creates a TCP listener on the specified local address.
///
/// @param local The local address to bind the TCP listener to.
///
/// @returns A `Result` containing the `TcpListener` if successful,
/// or an error tuple
pub async fn create_tcp_listener(local: &str) -> Result<TcpListener, (StatusCode, String)> {
let mut tcp_addr_obj = local.to_socket_addrs().map_err(|err| {
#[cfg(feature = "log")]
error!("Failed to parse from address: {err}");
(
StatusCode::BAD_REQUEST,
"failed to parse from address".to_owned(),
)
})?;
let tcp_addr_obj = tcp_addr_obj.next().ok_or_else(|| {
#[cfg(feature = "log")]
error!("Failed to get socket addr");
(
StatusCode::BAD_REQUEST,
"failed to get socket addr".to_owned(),
)
})?;
TcpListener::bind(tcp_addr_obj).await.map_err(|err| {
#[cfg(feature = "log")]
error!("Failed to bind tcp address {tcp_addr_obj:?}: {err}");
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("failed to bind tcp address {tcp_addr_obj:?}: {err}"),
)
})
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/main.rs | crates/wsrx/src/main.rs | use std::process;
use clap::Parser;
use rustls::crypto;
use tracing::{error, info, warn};
#[cfg(feature = "client")]
mod cli;
/// wsrx is a controlled WS-TCP tunnel for CTF platforms like ret2shell, GZCTF and CDSCTF etc..
#[derive(Parser)]
#[command(name = "wsrx", bin_name = "wsrx", version, about)]
enum WsrxCli {
#[clap(alias("d"))]
/// Launch wsrx daemon.
Daemon {
#[clap(long)]
/// The admin and ws http address to listen on.
host: Option<String>,
#[clap(short, long)]
/// The admin and ws http port to listen on.
port: Option<u16>,
#[clap(short, long)]
secret: Option<String>,
/// Log in json format.
#[clap(short, long)]
log_json: Option<bool>,
/// The heartbeat interval in seconds.
/// If not set, the daemon will not automatically exit when heartbeat
/// timeout.
#[clap(long)]
heartbeat: Option<u64>,
},
#[clap(alias("c"))]
/// Launch wsrx client.
Connect {
/// The address to connect to.
address: String,
#[clap(long)]
/// The admin and ws http address to listen on.
host: Option<String>,
#[clap(short, long)]
/// The admin and ws http port to listen on.
port: Option<u16>,
/// Log in json format.
#[clap(short, long)]
log_json: Option<bool>,
},
#[clap(alias("s"))]
/// Launch wsrx server.
Serve {
#[clap(long)]
/// The admin and ws http address to listen on.
host: Option<String>,
#[clap(short, long)]
/// The admin and ws http port to listen on.
port: Option<u16>,
#[clap(short, long)]
secret: Option<String>,
/// Log in json format.
#[clap(short, long)]
log_json: Option<bool>,
},
}
#[tokio::main]
async fn main() {
let cli = WsrxCli::parse();
match crypto::aws_lc_rs::default_provider().install_default() {
Ok(_) => info!("using `AWS Libcrypto` as default crypto backend."),
Err(err) => {
error!("`AWS Libcrypto` is not available: {:?}", err);
warn!("try to use `ring` as default crypto backend.");
crypto::ring::default_provider()
.install_default()
.inspect_err(|err| {
error!("`ring` is not available: {:?}", err);
error!("All crypto backend are not available, exiting...");
process::exit(1);
})
.ok();
info!("using `ring` as default crypto backend.");
}
}
#[cfg(feature = "client")]
match cli {
WsrxCli::Daemon {
host,
port,
secret,
log_json,
heartbeat,
} => cli::daemon::launch(host, port, secret, log_json, heartbeat).await,
WsrxCli::Connect {
address,
host,
port,
log_json,
} => cli::connect::launch(address, host, port, log_json).await,
WsrxCli::Serve {
host,
port,
secret,
log_json,
} => cli::serve::launch(host, port, secret, log_json).await,
}
#[cfg(not(feature = "client"))]
error!("wsrx client is not enabled.");
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/proxy.rs | crates/wsrx/src/proxy.rs | //! The main proxy module for WebSocket Reflector X.
use std::{
pin::Pin,
task::{Context, Poll},
};
#[cfg(feature = "server")]
use axum::extract::ws::{Message as AxMessage, WebSocket};
use futures_util::{StreamExt, sink::Sink, stream::Stream};
use thiserror::Error;
use tokio::net::TcpStream;
#[cfg(feature = "client")]
use tokio_tungstenite::{
MaybeTlsStream, WebSocketStream,
tungstenite::{Error as TgError, Message as TgMessage},
};
use tokio_util::{
bytes::{BufMut, Bytes, BytesMut},
codec::{Decoder, Encoder, Framed},
sync::CancellationToken,
};
/// An error type for WebSocket Reflector X.
#[derive(Error, Debug)]
pub enum Error {
/// An IO error.
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
/// A WebSocket error from tungstenite.
#[cfg(feature = "client")]
#[error("WebSocket error: {0}")]
WebSocket(#[from] TgError),
/// A WebSocket error from axum.
#[cfg(feature = "server")]
#[error("Axum error: {0}")]
Axum(#[from] axum::Error),
}
/// A enum for different type of WebSocket message.
///
/// Just Binary message will be tunneled, other type of websocket message will
/// just be discarded.
pub enum Message {
Binary(Vec<u8>),
Others,
}
/// A enum for different type of WebSocket message.
#[cfg(feature = "client")]
impl From<TgMessage> for Message {
/// Converts a `TgMessage` to a `Message`.
fn from(msg: TgMessage) -> Self {
match msg {
TgMessage::Binary(data) => Message::Binary(data.into()),
TgMessage::Text(data) => Message::Binary(data.as_bytes().to_vec()),
_ => Message::Others,
}
}
}
/// Converts a `Message` to a `TgMessage`.
#[cfg(feature = "server")]
impl From<AxMessage> for Message {
/// Converts a `AxMessage` to a `Message`.
fn from(msg: AxMessage) -> Self {
match msg {
AxMessage::Binary(data) => Message::Binary(data.into()),
AxMessage::Text(data) => Message::Binary(data.as_bytes().to_vec()),
_ => Message::Others,
}
}
}
/// A enum for different type of WebSocket stream.
///
/// honestly, this is a bit of a hack, but it works.
/// The WebSocketStream in axum is derived from tungstenite, but axum does not
/// expose the tungstenite stream.
pub enum WsStream {
/// Tungstenite WebSocket stream.
#[cfg(feature = "client")]
Tungstenite(Box<WebSocketStream<MaybeTlsStream<TcpStream>>>),
/// Axum WebSocket stream.
#[cfg(feature = "server")]
AxumWebsocket(Box<WebSocket>),
}
/// A wrapper around WebSocket stream.
pub struct WrappedWsStream {
/// The WebSocket stream.
stream: WsStream,
}
#[cfg(feature = "client")]
impl From<WebSocketStream<MaybeTlsStream<TcpStream>>> for WrappedWsStream {
/// Creates a new `WrappedWsStream` from tungstenite's WebSocket stream.
fn from(stream: WebSocketStream<MaybeTlsStream<TcpStream>>) -> Self {
WrappedWsStream {
stream: WsStream::Tungstenite(Box::new(stream)),
}
}
}
#[cfg(feature = "server")]
impl From<WebSocket> for WrappedWsStream {
/// Creates a new `WrappedWsStream` from axum's WebSocket stream.
fn from(stream: WebSocket) -> Self {
WrappedWsStream {
stream: WsStream::AxumWebsocket(Box::new(stream)),
}
}
}
/// A wrapper around WebSocket stream that implements `Stream` trait.
impl Stream for WrappedWsStream {
type Item = Result<Message, Error>;
/// Polls the next message from the WebSocket stream.
fn poll_next(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match &mut self.stream {
#[cfg(feature = "client")]
WsStream::Tungstenite(stream) => {
match futures_util::ready!(Pin::new(stream).poll_next(_cx)) {
Some(Ok(msg)) => Poll::Ready(Some(Ok(msg.into()))),
Some(Err(e)) => Poll::Ready(Some(Err(e.into()))),
None => Poll::Ready(None),
}
}
#[cfg(feature = "server")]
WsStream::AxumWebsocket(stream) => {
match futures_util::ready!(Pin::new(stream).poll_next(_cx)) {
Some(Ok(msg)) => Poll::Ready(Some(Ok(msg.into()))),
Some(Err(e)) => Poll::Ready(Some(Err(e.into()))),
None => Poll::Ready(None),
}
}
#[allow(unreachable_patterns)]
_ => Poll::Ready(None),
}
}
}
/// A wrapper around WebSocket stream that implements `Sink` trait.
impl Sink<Message> for WrappedWsStream {
type Error = Error;
/// Polls the WebSocket stream if it is ready to send a message.
fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match &mut self.get_mut().stream {
#[cfg(feature = "client")]
WsStream::Tungstenite(stream) => Pin::new(stream).poll_ready(_cx).map_err(|e| e.into()),
#[cfg(feature = "server")]
WsStream::AxumWebsocket(stream) => {
Pin::new(stream).poll_ready(_cx).map_err(|e| e.into())
}
#[allow(unreachable_patterns)]
_ => Poll::Ready(Ok(())),
}
}
/// Sends a message to the WebSocket stream.
fn start_send(self: Pin<&mut Self>, _item: Message) -> Result<(), Self::Error> {
match &mut self.get_mut().stream {
#[cfg(feature = "client")]
WsStream::Tungstenite(stream) => match _item {
Message::Binary(data) => Pin::new(stream)
.start_send(TgMessage::Binary(data.into()))
.map_err(|e| e.into()),
Message::Others => Ok(()),
},
#[cfg(feature = "server")]
WsStream::AxumWebsocket(stream) => match _item {
Message::Binary(data) => Pin::new(stream)
.start_send(AxMessage::Binary(data.into()))
.map_err(|e| e.into()),
Message::Others => Ok(()),
},
#[allow(unreachable_patterns)]
_ => Ok(()),
}
}
/// Polls the WebSocket stream to flush the message.
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match &mut self.get_mut().stream {
#[cfg(feature = "client")]
WsStream::Tungstenite(stream) => Pin::new(stream).poll_flush(_cx).map_err(|e| e.into()),
#[cfg(feature = "server")]
WsStream::AxumWebsocket(stream) => {
Pin::new(stream).poll_flush(_cx).map_err(|e| e.into())
}
#[allow(unreachable_patterns)]
_ => Poll::Ready(Ok(())),
}
}
/// Polls the WebSocket stream to close the connection.
fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match &mut self.get_mut().stream {
#[cfg(feature = "client")]
WsStream::Tungstenite(stream) => Pin::new(stream).poll_close(_cx).map_err(|e| e.into()),
#[cfg(feature = "server")]
WsStream::AxumWebsocket(stream) => {
Pin::new(stream).poll_close(_cx).map_err(|e| e.into())
}
#[allow(unreachable_patterns)]
_ => Poll::Ready(Ok(())),
}
}
}
/// Proxies a WebSocket stream with a TCP stream.
///
/// * `ws` - The WebSocket stream, either axum's stream or tungstenite stream
/// are supported.
/// * `tcp` - The TCP stream.
/// * `token` - The cancellation token to cancel the proxying.
pub async fn proxy(
ws: WrappedWsStream, tcp: TcpStream, token: CancellationToken,
) -> Result<(), Error> {
let framed_tcp_stream = Framed::new(tcp, MessageCodec::new());
proxy_stream(ws, framed_tcp_stream, token).await
}
/// Proxies two streams.
///
/// * `s1` - The first stream.
/// * `s2` - The second stream.
/// * `token` - The cancellation token to cancel the proxying.
pub async fn proxy_stream<S, T>(s1: S, s2: T, token: CancellationToken) -> Result<(), Error>
where
S: Sink<Message, Error = Error> + Stream<Item = Result<Message, Error>> + Unpin,
T: Sink<Message, Error = Error> + Stream<Item = Result<Message, Error>> + Unpin,
{
let (s1sink, s1stream) = s1.split();
let (s2sink, s2stream) = s2.split();
let f1 = s1stream.forward(s2sink);
let f2 = s2stream.forward(s1sink);
tokio::select! {
res = f1 => res,
res = f2 => res,
_ = token.cancelled() => Ok(())
}
}
/// A codec for WebSocket messages.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Default)]
pub struct MessageCodec;
impl MessageCodec {
/// Creates a new `MessageCodec` for shipping around raw bytes.
pub fn new() -> MessageCodec {
Self
}
}
/// A codec for WebSocket messages.
impl Decoder for MessageCodec {
type Item = Message;
type Error = Error;
/// Decodes a WebSocket message from the buffer.
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Message>, Error> {
if !buf.is_empty() {
let len = buf.len();
Ok(Some(Message::Binary(buf.split_to(len).to_vec())))
} else {
Ok(None)
}
}
}
/// A codec for WebSocket messages.
impl Encoder<Message> for MessageCodec {
type Error = Error;
/// Encodes a WebSocket message to the buffer.
fn encode(&mut self, data: Message, buf: &mut BytesMut) -> Result<(), Error> {
match data {
Message::Binary(data) => {
buf.reserve(data.len());
buf.put(Bytes::from(data));
Ok(())
}
Message::Others => Ok(()),
}
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/tunnel.rs | crates/wsrx/src/tunnel.rs | use std::sync::Arc;
use serde::{Deserialize, Serialize};
use tokio::{net::TcpListener, task::JoinHandle};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
use super::proxy;
/// Configuration for a tunnel, contains the local and remote addresses.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TunnelConfig {
#[serde(alias = "from")]
pub local: String,
#[serde(alias = "to")]
pub remote: String,
}
/// A tunnel that proxies TCP connections to a remote WebSocket server.
///
/// This struct is responsible for creating a TCP listener and accepting
/// incoming connections. It will then establish a WebSocket connection to the
/// remote server and proxy the data between the TCP connection and the
/// WebSocket connection.
#[derive(Debug)]
pub struct Tunnel {
config: TunnelConfig,
token: CancellationToken,
handle: JoinHandle<()>,
}
impl Serialize for Tunnel {
#[inline(always)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.config.serialize(serializer)
}
}
impl Tunnel {
/// Creates a new `Tunnel` instance.
pub fn new(remote: impl AsRef<str>, listener: TcpListener) -> Self {
let local = listener
.local_addr()
.expect("failed to bind port")
.to_string();
info!("CREATE tcp server: {} <-wsrx-> {}", local, remote.as_ref());
let token = CancellationToken::new();
let config = TunnelConfig {
local,
remote: remote.as_ref().to_string(),
};
let loop_config = Arc::new(config.clone());
let loop_token = token.clone();
let handle = tokio::spawn(async move {
loop {
let Ok((tcp, _)) = listener.accept().await else {
error!("Failed to accept tcp connection, exiting.");
loop_token.cancel();
return;
};
let peer_addr = tcp.peer_addr().unwrap();
if loop_token.is_cancelled() {
info!(
"STOP tcp server: {} <-wsrx-> {}: Task cancelled",
loop_config.local, loop_config.remote
);
return;
}
info!("LINK {} <-wsrx-> {}", loop_config.remote, peer_addr);
let proxy_config = loop_config.clone();
let proxy_token = loop_token.clone();
tokio::spawn(async move {
use tokio_tungstenite::connect_async;
let ws = match connect_async(proxy_config.remote.as_str()).await {
Ok((ws, _)) => ws,
Err(e) => {
error!("Failed to connect to {}: {}", proxy_config.remote, e);
return;
}
};
match proxy(ws.into(), tcp, proxy_token).await {
Ok(_) => {}
Err(e) => {
error!("Failed to proxy: {e}");
}
}
});
}
});
Self {
config,
token,
handle,
}
}
}
/// Implements the `Drop` trait for the `Tunnel` struct.
///
/// This will cancel the cancellation token and abort the task when the
/// `Tunnel` instance is dropped.
impl Drop for Tunnel {
fn drop(&mut self) {
info!(
"REMOVE tcp server: {} <-wsrx-> {}",
self.config.local, self.config.remote
);
self.token.cancel();
self.handle.abort();
}
}
impl std::ops::Deref for Tunnel {
type Target = TunnelConfig;
fn deref(&self) -> &Self::Target {
&self.config
}
}
impl std::ops::DerefMut for Tunnel {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.config
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/cli/serve.rs | crates/wsrx/src/cli/serve.rs | use std::{collections::HashMap, sync::Arc, time::Duration};
use axum::{
body::Body,
extract::{FromRef, Path, Request as ExtractRequest, State, WebSocketUpgrade},
http::{Request, StatusCode, header::CONTENT_TYPE},
middleware::Next,
response::{IntoResponse, Response},
routing::get,
};
use serde::Deserialize;
use tokio::{
net::{TcpListener, TcpStream},
sync::RwLock,
};
use tokio_util::sync::CancellationToken;
use tower_http::trace::TraceLayer;
use tracing::{Span, error, info};
use wsrx::proxy;
use crate::cli::logger::init_logger;
/// Launch the server with the given host, port, and secret.
pub async fn launch(
host: Option<String>, port: Option<u16>, secret: Option<String>, log_json: Option<bool>,
) {
let log_json = log_json.unwrap_or(false);
init_logger(log_json);
let router = build_router(secret);
let listener = TcpListener::bind(&format!(
"{}:{}",
host.unwrap_or(String::from("127.0.0.1")),
port.unwrap_or(0)
))
.await
.expect("failed to bind port");
info!(
"wsrx server is listening on {}",
listener.local_addr().expect("failed to bind port")
);
info!(
"you can access manage api at http://{}/pool",
listener.local_addr().expect("failed to bind port")
);
axum::serve(listener, router)
.await
.expect("failed to launch server");
}
type ConnectionMap = Arc<RwLock<HashMap<String, String>>>;
/// The global state of the server.
#[derive(Clone, FromRef)]
pub struct GlobalState {
pub secret: Option<String>,
pub connections: ConnectionMap,
}
/// Build the router with the given secret.
fn build_router(secret: Option<String>) -> axum::Router {
let state = GlobalState {
secret,
connections: Default::default(),
};
axum::Router::new()
.route(
"/pool",
get(get_tunnels).post(launch_tunnel).delete(close_tunnel),
)
.layer(axum::middleware::from_fn_with_state(
state.clone(),
|State(secret): State<Option<String>>, req: ExtractRequest, next: Next| async move {
if let Some(secret) = secret {
if let Some(auth) = req.headers().get("authorization")
&& auth.to_str().map_err(|_| StatusCode::UNAUTHORIZED)? == secret
{
return Ok(next.run(req).await);
}
return Err(StatusCode::UNAUTHORIZED);
}
Ok(next.run(req).await)
},
))
.route("/traffic/{*key}", get(process_traffic).options(ping))
.layer(
TraceLayer::new_for_http()
.make_span_with(|request: &Request<Body>| {
tracing::info_span!(
"http",
method = %request.method(),
uri = %request.uri().path(),
)
})
.on_request(())
.on_response(|response: &Response, latency: Duration, _span: &Span| {
info!("[{}] in {}ms", response.status(), latency.as_millis());
}),
)
.with_state::<()>(state)
}
/// The request body for launching a tunnel.
#[derive(Deserialize)]
struct TunnelRequest {
pub from: String,
pub to: String,
}
/// Launch a tunnel from the given address to the given address.
async fn launch_tunnel(
State(connections): State<ConnectionMap>, axum::Json(req): axum::Json<TunnelRequest>,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
let mut pool = connections.write().await;
pool.insert(req.from, req.to);
Ok(StatusCode::CREATED)
}
/// Get the list of tunnels.
async fn get_tunnels(State(connections): State<ConnectionMap>) -> impl IntoResponse {
let pool = connections.read().await;
let resp = serde_json::to_string::<HashMap<String, String>>(&pool).map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("failed to serialize pool: {e}"),
)
});
axum::response::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(resp.unwrap())
.unwrap()
}
/// The request body for closing a tunnel.
#[derive(Deserialize)]
struct CloseTunnelRequest {
pub key: String,
}
/// Close a tunnel with the given key.
async fn close_tunnel(
State(connections): State<ConnectionMap>, axum::Json(req): axum::Json<CloseTunnelRequest>,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
let mut pool = connections.write().await;
if pool.remove(&req.key).is_some() {
Ok(StatusCode::OK)
} else {
Err((StatusCode::NOT_FOUND, "not found"))
}
}
/// Process the traffic between the WebSocket and TCP connection.
async fn process_traffic(
State(connections): State<ConnectionMap>, Path(key): Path<String>, ws: WebSocketUpgrade,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
let pool = connections.read().await;
if let Some(conn) = pool.get(&key) {
let tcp_addr = conn.to_owned();
Ok(ws.on_upgrade(move |socket| async move {
let tcp = TcpStream::connect(&tcp_addr).await;
if tcp.is_err() {
error!("failed to connect to tcp server: {}", tcp.unwrap_err());
return;
}
let tcp = tcp.unwrap();
proxy(socket.into(), tcp, CancellationToken::new())
.await
.ok();
}))
} else {
Err((StatusCode::NOT_FOUND, "not found"))
}
}
/// Ping the server to check if the connection is alive.
async fn ping(
State(connections): State<ConnectionMap>, Path(key): Path<String>,
) -> impl IntoResponse {
let pool = connections.read().await;
if pool.get(&key).is_some() {
StatusCode::OK
} else {
StatusCode::NOT_FOUND
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/cli/logger.rs | crates/wsrx/src/cli/logger.rs | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
/// Initialize the logger.
pub fn init_logger(json: bool) {
if json {
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| "wsrx=info,tower_http=info".into()),
)
.with(tracing_subscriber::fmt::layer().json())
.init();
} else {
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| "wsrx=info,tower_http=info".into()),
)
.with(tracing_subscriber::fmt::layer())
.init();
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/cli/connect.rs | crates/wsrx/src/cli/connect.rs | use std::sync::Arc;
use tokio::net::{TcpListener, TcpStream};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
use url::Url;
use wsrx::proxy;
use crate::cli::logger::init_logger;
pub async fn launch(
address: String, host: Option<String>, port: Option<u16>, log_json: Option<bool>,
) {
let log_json = log_json.unwrap_or(false);
init_logger(log_json);
let port = port.unwrap_or(0);
let host = host.unwrap_or(String::from("127.0.0.1"));
let listener = TcpListener::bind(format!("{host}:{port}"))
.await
.expect("failed to bind port");
let Ok(url) = Url::parse(&address) else {
error!("Invalid url, please check your input.");
return;
};
if url.scheme() != "ws" && url.scheme() != "wss" {
error!("Invalid url scheme, only `ws` and `wss` are supported.");
return;
}
let url = url.as_ref().to_string();
info!(
"Hi, I am not RX, RX is here -> {}",
listener.local_addr().unwrap()
);
warn!(
"wsrx will not report non-critical errors by default, you can set `RUST_LOG=wsrx=debug` to see more details."
);
let token = CancellationToken::new();
let url = Arc::new(url);
// This loop will "run forever"
loop {
let Ok((tcp, _)) = listener.accept().await else {
error!("Failed to accept tcp connection, exiting.");
token.cancel();
return;
};
if token.is_cancelled() {
return;
}
let url = url.clone();
let peer_addr = tcp.peer_addr().unwrap();
info!("CREATE remote <-wsrx-> {}", peer_addr);
let token = token.clone();
tokio::spawn(async move {
match proxy_ws_addr(url.as_ref(), tcp, token).await {
Ok(_) => {}
Err(e) => {
info!("REMOVE remote <-wsrx-> {} with error", peer_addr);
debug!("TCP connection closed: {}", e);
}
}
});
}
}
async fn proxy_ws_addr(
addr: impl AsRef<str>, tcp: TcpStream, token: CancellationToken,
) -> Result<(), wsrx::Error> {
let peer_addr = tcp.peer_addr().unwrap();
let (ws, _) = tokio_tungstenite::connect_async(addr.as_ref()).await?;
proxy(ws.into(), tcp, token).await?;
info!("REMOVE remote <-wsrx-> {}", peer_addr);
Ok(())
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/cli/mod.rs | crates/wsrx/src/cli/mod.rs | pub mod connect;
pub mod daemon;
pub mod logger;
pub mod serve;
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/wsrx/src/cli/daemon.rs | crates/wsrx/src/cli/daemon.rs | use std::{
collections::HashMap,
ops::Deref,
sync::{Arc, RwLock as SyncRwLock},
time::Duration,
};
use axum::{
Json,
body::Body,
extract::{FromRef, Request as ExtractRequest, State},
http::{HeaderMap, HeaderValue, Method, Request, StatusCode, header::CONTENT_TYPE},
middleware::Next,
response::{IntoResponse, Response},
routing::get,
};
use chrono::{DateTime, Utc};
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use tokio::{net::TcpListener, sync::RwLock};
use tower_http::{
cors::{AllowOrigin, Any, CorsLayer},
trace::TraceLayer,
};
use tracing::{Span, debug, error, info};
use wsrx::{
tunnel::{Tunnel, TunnelConfig},
utils::create_tcp_listener,
};
use crate::cli::logger::init_logger;
pub async fn launch(
host: Option<String>, port: Option<u16>, secret: Option<String>, log_json: Option<bool>,
heartbeat: Option<u64>,
) {
let log_json = log_json.unwrap_or(false);
init_logger(log_json);
let router = build_router(secret);
let listener = TcpListener::bind(&format!(
"{}:{}",
host.unwrap_or(String::from("127.0.0.1")),
port.unwrap_or(0)
))
.await
.expect("failed to bind port");
info!(
"wsrx daemon is listening on {}",
listener.local_addr().expect("failed to bind port")
);
info!(
"you can access manage api at http://{}/pool",
listener.local_addr().expect("failed to bind port")
);
if let Some(interval) = heartbeat {
tokio::spawn(heartbeat_watchdog(interval));
}
axum::serve(listener, router)
.await
.expect("failed to launch server");
}
type ConnectionMap = Arc<RwLock<HashMap<String, Tunnel>>>;
#[derive(Clone, FromRef)]
pub struct GlobalState {
pub secret: Option<String>,
pub connections: ConnectionMap,
}
static ALLOWED_ORIGINS: Lazy<Arc<SyncRwLock<Vec<String>>>> =
Lazy::new(|| Arc::new(SyncRwLock::new(Vec::new())));
static PENDING_ORIGINS: Lazy<Arc<SyncRwLock<Vec<String>>>> =
Lazy::new(|| Arc::new(SyncRwLock::new(Vec::new())));
static HEARTBEAT_TIME: Lazy<Arc<SyncRwLock<DateTime<Utc>>>> =
Lazy::new(|| Arc::new(SyncRwLock::new(Utc::now())));
async fn heartbeat_watchdog(interval: u64) {
loop {
tokio::time::sleep(Duration::from_secs(interval)).await;
let last_heartbeat = HEARTBEAT_TIME.read().ok();
if last_heartbeat.is_none() {
continue;
}
let last_heartbeat = last_heartbeat.unwrap();
if Utc::now()
.signed_duration_since(*last_heartbeat)
.num_seconds()
> interval as i64
{
error!("Heartbeat timeout, last active at {last_heartbeat}, exiting.");
std::process::exit(0);
} else {
debug!("Heartbeat check passed, last active at {last_heartbeat}.");
}
}
}
fn build_router(secret: Option<String>) -> axum::Router {
let state = GlobalState {
secret,
connections: Default::default(),
};
let cors_layer = CorsLayer::new()
.allow_methods([Method::GET, Method::POST, Method::DELETE])
.allow_headers(Any)
.allow_origin(AllowOrigin::predicate(
|origin: &HeaderValue, _request_parts: &_| {
let allowed_origin = ALLOWED_ORIGINS.read().unwrap();
for o in allowed_origin.iter() {
if origin.to_str().unwrap_or("").ends_with(o) {
return true;
}
}
false
},
));
let any_origin_layer = CorsLayer::new()
.allow_methods([Method::POST])
.allow_headers(Any)
.allow_origin(Any);
axum::Router::new()
.merge(
axum::Router::new()
.route(
"/pool",
get(get_tunnels).post(launch_tunnel).delete(close_tunnel),
)
.route("/heartbeat", get(update_heartbeat))
.route(
"/access",
get(get_origins)
.post(add_allowed_origin)
.delete(remove_allowed_origin),
)
.layer(cors_layer)
.with_state(state.clone()),
)
.merge(
axum::Router::new()
.route("/connect", get(get_cors_status).post(add_pending_origin))
.layer(any_origin_layer)
.with_state(state.clone()),
)
.layer(axum::middleware::from_fn_with_state(
state.clone(),
|State(secret): State<Option<String>>, req: ExtractRequest, next: Next| async move {
if let Some(secret) = secret {
if let Some(auth) = req.headers().get("authorization")
&& auth.to_str().map_err(|_| StatusCode::UNAUTHORIZED)? == secret
{
return Ok(next.run(req).await);
}
return Err(StatusCode::UNAUTHORIZED);
}
Ok(next.run(req).await)
},
))
.layer(
TraceLayer::new_for_http()
.make_span_with(|request: &Request<Body>| {
tracing::info_span!(
"http",
method = %request.method(),
uri = %request.uri().path(),
)
})
.on_request(())
.on_failure(())
.on_response(|response: &Response, latency: Duration, _span: &Span| {
debug!(
"API Request [{}] in {}ms",
response.status(),
latency.as_millis()
);
}),
)
.with_state::<()>(state)
}
async fn launch_tunnel(
State(connections): State<ConnectionMap>, axum::Json(req): axum::Json<TunnelConfig>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let mut pool = connections.write().await;
let listener = create_tcp_listener(req.local.as_str()).await?;
let tunnel = Tunnel::new(req.remote, listener);
let resp = serde_json::to_string(&tunnel).map_err(|e| {
error!("Failed to serialize tunnel: {e:?}");
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to serialize tunnel: {e:?}"),
)
});
pool.insert(tunnel.local.clone(), tunnel);
resp
}
async fn get_tunnels(State(connections): State<ConnectionMap>) -> impl IntoResponse {
let pool = connections.read().await;
let resp = serde_json::to_string(pool.deref()).map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("failed to serialize pool: {e}"),
)
});
axum::response::Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(resp.unwrap())
.unwrap()
}
#[derive(Deserialize)]
struct CloseTunnelRequest {
pub key: String,
}
async fn close_tunnel(
State(connections): State<ConnectionMap>, axum::Json(req): axum::Json<CloseTunnelRequest>,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
if connections.write().await.remove(&req.key).is_some() {
Ok(StatusCode::OK)
} else {
error!("Tunnel does not exist: {}", req.key);
Err((StatusCode::NOT_FOUND, "not found"))
}
}
#[derive(Serialize)]
struct OriginResponse {
pub allowed: Vec<String>,
pub pending: Vec<String>,
}
async fn get_origins() -> Result<impl IntoResponse, (StatusCode, String)> {
let allowed_origin = ALLOWED_ORIGINS.read().unwrap();
let pending = PENDING_ORIGINS.read().unwrap();
let resp = OriginResponse {
allowed: allowed_origin.clone(),
pending: pending.clone(),
};
Ok(Json(resp))
}
async fn add_allowed_origin(
axum::Json(req): axum::Json<String>,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
let mut allowed_origin = ALLOWED_ORIGINS.write().map_err(|_| {
error!("Failed to lock allowed origin");
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed to lock allowed origin",
)
})?;
let mut waitlist = PENDING_ORIGINS.write().map_err(|_| {
error!("Failed to lock origin waitlist");
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed to lock origin waitlist",
)
})?;
if waitlist.contains(&req) {
waitlist.retain(|o| o != &req);
}
allowed_origin.push(req);
Ok(StatusCode::OK)
}
async fn remove_allowed_origin(
axum::Json(req): axum::Json<String>,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
let mut allowed_origin = ALLOWED_ORIGINS.write().map_err(|_| {
error!("Failed to lock allowed origin");
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed to lock allowed origin",
)
})?;
let mut waitlist = PENDING_ORIGINS.write().map_err(|_| {
error!("Failed to lock origin waitlist");
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed to lock origin waitlist",
)
})?;
if waitlist.contains(&req) {
waitlist.retain(|o| o != &req);
}
allowed_origin.retain(|o| o != &req);
Ok(StatusCode::OK)
}
async fn get_cors_status(headers: HeaderMap) -> impl IntoResponse {
let allowed_origins = ALLOWED_ORIGINS
.read()
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let pending_origins = PENDING_ORIGINS
.read()
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
let origin = headers.get("origin").map(|o| o.to_str().unwrap_or(""));
match origin {
Some(origin) => {
if allowed_origins.contains(&origin.to_string()) {
Ok(StatusCode::ACCEPTED)
} else if pending_origins.contains(&origin.to_string()) {
Err(StatusCode::CREATED)
} else {
Err(StatusCode::FORBIDDEN)
}
}
None => Ok(StatusCode::ACCEPTED),
}
}
async fn add_pending_origin(
axum::Json(req): axum::Json<String>,
) -> Result<impl IntoResponse, (StatusCode, &'static str)> {
let allowed_origin = ALLOWED_ORIGINS.read().map_err(|_| {
error!("Failed to lock allowed origin");
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed to lock allowed origin",
)
})?;
if allowed_origin.contains(&req) {
return Ok(StatusCode::ACCEPTED);
}
let mut waitlist = PENDING_ORIGINS.write().map_err(|_| {
(
StatusCode::INTERNAL_SERVER_ERROR,
"failed to lock origin waitlist",
)
})?;
if waitlist.contains(&req) {
return Ok(StatusCode::CREATED);
}
waitlist.push(req);
Ok(StatusCode::CREATED)
}
async fn update_heartbeat() -> impl IntoResponse {
let mut last_heartbeat = HEARTBEAT_TIME.write().unwrap();
*last_heartbeat = Utc::now();
StatusCode::OK
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/build.rs | crates/desktop/build.rs | use std::{env, fs, path::Path};
fn main() {
let config = slint_build::CompilerConfiguration::new()
.with_bundled_translations("ui/i18n")
.embed_resources(slint_build::EmbedResourcesKind::EmbedFiles);
slint_build::compile_with_config("ui/main.slint", config).expect("Slint build failed");
println!("cargo::rerun-if-changed=ui/i18n");
println!("cargo::rerun-if-env-changed=WSRX_GIT_VERSION");
let out_dir = env::var_os("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("constants.rs");
let git_v = if env::var("WSRX_GIT_VERSION").is_ok() {
env::var("WSRX_GIT_VERSION").unwrap().to_uppercase()
} else {
git_version::git_version!(
args = ["--abbrev=8", "--always", "--dirty=*", "--match=''"],
fallback = "unknown"
)
.to_uppercase()
};
let version = format!(
"{}-{git_v}-{}",
env!("CARGO_PKG_VERSION"),
rustc_version::version().unwrap()
);
let full_version = format!(
"{version}-{}-{}-{}",
build_target::target_arch(),
build_target::target_os(),
build_target::target_env()
.map(|v| v.as_str().to_string())
.unwrap_or("general".to_string()),
);
fs::write(
dest_path,
format!("pub const WSRX_VERSION: &str = \"{version}\";\npub const WSRX_FULL_VERSION: &str = \"{full_version}\";\n"),
)
.unwrap();
if cfg!(target_os = "windows") {
let mut res = winres::WindowsResource::new();
res.set_icon("ui/assets/logo.ico");
res.compile().expect("Failed to set icon");
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/lib.rs | crates/desktop/src/lib.rs | pub mod bridges;
pub mod daemon;
pub mod launcher;
pub mod logging;
include!(concat!(env!("OUT_DIR"), "/constants.rs"));
pub mod ui {
slint::include_modules!();
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/main.rs | crates/desktop/src/main.rs | // Prevent console window in addition to Slint window in Windows release builds
// when, e.g., starting the app via file manager. Ignored on other platforms.
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
use std::error::Error;
use slint::ComponentHandle;
use wsrx_desktop::{launcher, logging};
fn main() -> Result<(), Box<dyn Error>> {
// Initialize the logger.
let (console_guard, file_guard) = logging::setup()?;
// Set the platform backend to winit.
#[cfg(not(target_os = "macos"))]
slint::platform::set_platform(Box::new(i_slint_backend_winit::Backend::new().unwrap()))?;
#[cfg(target_os = "macos")]
{
use winit::platform::macos::WindowAttributesExtMacOS;
let mut backend = i_slint_backend_winit::Backend::new().unwrap();
backend.window_attributes_hook = Some(Box::new(|attr| {
attr.with_fullsize_content_view(true)
.with_title_hidden(true)
.with_titlebar_transparent(true)
}));
slint::platform::set_platform(Box::new(backend))?;
}
// Create the main window.
let ui = launcher::setup()?;
ui.run().ok();
drop(console_guard);
drop(file_guard);
drop(ui);
Ok(())
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/logging.rs | crates/desktop/src/logging.rs | use directories::ProjectDirs;
use tracing_appender::{non_blocking, rolling};
use tracing_subscriber::{EnvFilter, fmt::Layer, layer::SubscriberExt, util::SubscriberInitExt};
/// Initialize the logger.
pub fn setup()
-> Result<(non_blocking::WorkerGuard, non_blocking::WorkerGuard), Box<dyn std::error::Error>> {
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
eprintln!("Unable to find project config directories");
return Err("Unable to find project config directories".into());
}
};
let log_dir = proj_dirs.data_local_dir().join("logs");
std::fs::create_dir_all(&log_dir)?;
let file_appender = rolling::RollingFileAppender::builder()
.rotation(rolling::Rotation::NEVER)
.filename_prefix("wsrx")
.filename_suffix("log");
let file_appender = file_appender.build(std::path::Path::new(&log_dir).canonicalize()?)?;
let (non_blocking_file, file_guard) = non_blocking(file_appender);
let (non_blocking_console, console_guard) = non_blocking(std::io::stdout());
let file_log_layer = Layer::new()
.with_writer(non_blocking_file)
.with_ansi(false)
.with_target(true)
.with_level(true)
.with_thread_ids(false)
.with_thread_names(false)
.json();
let console_log_layer = Layer::new()
.with_writer(non_blocking_console)
.with_ansi(true)
.with_target(true)
.with_level(true)
.with_thread_ids(false)
.with_thread_names(false);
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
tracing_subscriber::registry()
.with(filter)
.with(file_log_layer)
.with(console_log_layer)
.init();
std::panic::set_hook(Box::new(|panic| {
// If the panic has a source location, record it as structured fields.
if let Some(location) = panic.location() {
// On nightly Rust, where the `PanicInfo` type also exposes a
// `message()` method returning just the message, we could record
// just the message instead of the entire `fmt::Display`
// implementation, avoiding the duplicated location
tracing::error!(
message = %panic,
panic.file = location.file(),
panic.line = location.line(),
panic.column = location.column(),
);
} else {
tracing::error!(message = %panic);
}
}));
Ok((console_guard, file_guard))
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/launcher.rs | crates/desktop/src/launcher.rs | use async_compat::Compat;
use directories::ProjectDirs;
use slint::PlatformError;
use tracing::info;
use crate::{bridges, daemon, ui::MainWindow};
pub fn setup() -> Result<MainWindow, PlatformError> {
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
eprintln!("Unable to find project config directories");
return Err(PlatformError::Other(
"Unable to find project config directories".to_string(),
));
}
};
let lock_file = proj_dirs.data_local_dir().join(".rx.is.alive");
if lock_file.exists() {
eprintln!("Another instance of the application is already running.");
let api_port = std::fs::read_to_string(&lock_file).unwrap_or_else(|_| {
eprintln!("Failed to read lock file");
std::fs::remove_file(&lock_file).unwrap_or_else(|_| {
eprintln!("Failed to remove lock file");
});
std::process::exit(1);
});
eprintln!("Notify the other instance to raise...");
slint::spawn_local(Compat::new(async move {})).expect("Failed to spawn thread");
let client = reqwest::blocking::Client::new();
match client
.post(format!("http://127.0.0.1:{api_port}/popup"))
.header("User-Agent", format!("wsrx/{}", env!("CARGO_PKG_VERSION")))
.send()
{
Ok(_) => {
eprintln!("Notification sent.");
}
Err(e) => {
eprintln!("Failed to send notification: {e}, removing lock file.");
std::fs::remove_file(&lock_file).unwrap_or_else(|_| {
eprintln!("Failed to remove lock file");
});
}
}
std::process::exit(0);
}
let ui = MainWindow::new()?;
info!("WSRX initialization started...");
info!("Setting up data bridges...");
bridges::setup(&ui);
bridges::settings::load_config(&ui);
info!("Launching API server...");
daemon::setup(&ui);
info!("Initialization is finished.");
info!("高性能ですから! (∠・ω< )⌒☆");
Ok(ui)
}
pub fn shutdown(ui: &slint::Weak<MainWindow>) {
let window = ui.upgrade().unwrap();
bridges::settings::save_config(&window);
daemon::save_scopes(ui);
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
eprintln!("Unable to find project config directories");
return;
}
};
let log_dir = proj_dirs.data_local_dir().join("logs");
std::fs::remove_dir_all(log_dir).unwrap_or_else(|_| {
eprintln!("Failed to remove log directory");
});
let lock_file = proj_dirs.data_local_dir().join(".rx.is.alive");
std::fs::remove_file(lock_file).unwrap_or_else(|_| {
eprintln!("Failed to remove lock file");
});
std::process::exit(0);
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/bridges/settings.rs | crates/desktop/src/bridges/settings.rs | use directories::ProjectDirs;
use serde::{Deserialize, Serialize};
use slint::ComponentHandle;
use tracing::{debug, error, warn};
use crate::ui::{MainWindow, SettingsBridge};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct WsrxDesktopConfig {
#[serde(default = "default_theme")]
pub theme: String,
#[serde(default = "default_running_in_tray")]
pub running_in_tray: bool,
#[serde(default = "default_language")]
pub language: String,
}
impl Default for WsrxDesktopConfig {
fn default() -> Self {
Self {
theme: default_theme(),
running_in_tray: default_running_in_tray(),
language: default_language(),
}
}
}
// handle "en-US" / "en" / "zh" / "zh-CN" / "zh-Hans-CN" / "zh-Hant-TW"
// into one of "en_US" / "zh_CN" / "zh_TW"
fn normalize_language(locale: String) -> String {
let mut parts = locale.split('-');
let lang = parts.next().unwrap_or("en");
let region = parts.next().map(|s| match s {
"CN" => "CN",
"TW" => "TW",
"HK" => "TW",
"Hans" => "CN",
"Hant" => "TW",
_ => "US",
});
match lang {
"en" => format!("en_{}", region.unwrap_or("US")),
"zh" => format!("zh_{}", region.unwrap_or("CN")),
_ => {
warn!("Unsupported language: {}, defaulting to en_US", locale);
"en_US".to_string()
}
}
}
fn default_language() -> String {
sys_locale::get_locale()
.map(normalize_language)
.unwrap_or_else(|| {
warn!("Failed to get system locale, defaulting to en_US");
"en_US".to_string()
})
}
fn default_theme() -> String {
"dark".to_string()
}
const fn default_running_in_tray() -> bool {
false
}
pub fn load_config(window: &MainWindow) {
let bridge = window.global::<SettingsBridge>();
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
error!("Unable to find project config directories");
return;
}
};
let config_file = proj_dirs.config_dir().join("config.toml");
let config = match std::fs::read_to_string(&config_file) {
Ok(config) => config,
Err(e) => {
error!("Failed to read config file: {}", e);
"".to_string()
}
};
let config: WsrxDesktopConfig = toml::from_str(&config).unwrap_or_default();
debug!("Loaded config: {:?}", config);
bridge.set_theme(config.theme.into());
slint::select_bundled_translation(&config.language).ok();
bridge.set_language(config.language.into());
bridge.set_running_in_tray(config.running_in_tray);
let window_clone = window.as_weak();
bridge.on_change_language(move |lang| {
let window = window_clone.upgrade().unwrap();
let bridge = window.global::<SettingsBridge>();
bridge.set_language(lang.clone());
slint::select_bundled_translation(lang.as_str()).ok();
});
}
pub fn save_config(window: &MainWindow) {
let bridge = window.global::<SettingsBridge>();
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
error!("Unable to find project config directories");
return;
}
};
let config_file = proj_dirs.config_dir().join("config.toml");
let config = WsrxDesktopConfig {
theme: bridge.get_theme().into(),
running_in_tray: bridge.get_running_in_tray(),
language: bridge.get_language().into(),
};
debug!("Saving config: {:?}", config);
let config = toml::to_string(&config).unwrap_or_else(|e| {
error!("Failed to serialize config: {}", e);
String::new()
});
if let Err(e) = std::fs::create_dir_all(proj_dirs.config_dir()) {
error!("Failed to create config directory: {}", e);
return;
}
if let Err(e) = std::fs::write(&config_file, config) {
error!("Failed to write config file: {}", e);
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/bridges/ui_state.rs | crates/desktop/src/bridges/ui_state.rs | use slint::{ComponentHandle, Model, SharedString, VecModel};
use tracing::debug;
use crate::ui::{Instance, InstanceBridge, MainWindow, Scope, ScopeBridge, UiState};
pub fn setup(window: &MainWindow) {
let bridge = window.global::<UiState>();
let window_weak = window.as_weak();
bridge.on_change_scope(move |scope: SharedString| {
let window = window_weak.clone().upgrade().unwrap();
let ui_state = window.global::<UiState>();
ui_state.set_scope(Scope {
host: "localhost".into(),
name: "localhost".into(),
state: "pending".into(),
features: "".into(),
settings: "{}".into(),
});
let window_weak = window_weak.clone();
match slint::spawn_local(async move {
let window = window_weak.clone().upgrade().unwrap();
let ui_state = window.global::<UiState>();
let scope_bridge = window.global::<ScopeBridge>();
let scopes = scope_bridge.get_scopes();
let scopes = scopes.as_any().downcast_ref::<VecModel<Scope>>().unwrap();
let found_scope = scopes.iter().find(|s| s.host == scope);
if let Some(found_scope) = found_scope {
ui_state.set_scope(found_scope);
debug!("Scope found: {scope}");
} else {
debug!("Scope not found: {scope}");
}
sync_scoped_instance(window_weak);
}) {
Ok(_) => {}
Err(e) => {
debug!("Failed to change scope: {e}");
}
}
});
}
pub fn sync_scoped_instance(window: slint::Weak<MainWindow>) {
let window = window.upgrade().unwrap();
let ui_state = window.global::<UiState>();
let instance_bridge = window.global::<InstanceBridge>();
let instances = instance_bridge.get_instances();
let instances = instances
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
let scoped_instances = instance_bridge.get_scoped_instances();
let scoped_instances = scoped_instances
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
let current_scope = ui_state.get_page().to_string();
scoped_instances.clear();
for instance in instances.iter() {
if instance.scope_host == current_scope {
scoped_instances.push(instance.clone());
}
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/bridges/mod.rs | crates/desktop/src/bridges/mod.rs | pub mod settings;
pub mod system_info;
pub mod ui_state;
pub mod window_control;
use crate::ui::MainWindow;
pub fn setup(window: &MainWindow) {
window_control::setup(window);
system_info::setup(window);
ui_state::setup(window);
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/bridges/system_info.rs | crates/desktop/src/bridges/system_info.rs | use std::rc::Rc;
use async_compat::Compat;
use chrono::{DateTime, Utc};
use directories::ProjectDirs;
use local_ip_address::list_afinet_netifas;
use serde::Deserialize;
use slint::{ComponentHandle, Model, ModelRc, SharedString, VecModel};
use sysinfo::System;
use tokio::{fs, io::AsyncBufReadExt, time::timeout};
use tracing::{debug, error, warn};
use crate::{
WSRX_FULL_VERSION,
ui::{Log, MainWindow, SystemInfoBridge},
};
pub fn setup(window: &MainWindow) {
let bridge = window.global::<SystemInfoBridge>();
#[cfg(target_os = "linux")]
bridge.set_os("linux".into());
#[cfg(target_os = "windows")]
bridge.set_os("windows".into());
#[cfg(target_os = "macos")]
bridge.set_os("macos".into());
bridge.set_info(
format!(
"System : {}\nLocale : {}\nCPU : {}\nKernel : {}\nWSRX : {}",
System::name().unwrap_or_else(|| "Unknown".into()),
sys_locale::get_locale().unwrap_or_else(|| "Unknown".into()),
System::cpu_arch(),
System::kernel_long_version(),
WSRX_FULL_VERSION,
)
.into(),
);
bridge.set_version(env!("CARGO_PKG_VERSION").into());
let network_interfaces_model: Rc<VecModel<SharedString>> =
Rc::new(VecModel::from(vec!["127.0.0.1".into(), "0.0.0.0".into()]));
let network_interfaces = ModelRc::from(network_interfaces_model.clone());
bridge.set_interfaces(network_interfaces);
let logs_model: Rc<VecModel<Log>> = Rc::new(VecModel::default());
let logs = ModelRc::from(logs_model.clone());
bridge.set_logs(logs);
bridge.on_refresh_interfaces(move || {
let model = network_interfaces_model.clone();
refresh_network_interfaces(model);
});
bridge.on_open_link(move |url| {
open::that_detached(&url).unwrap_or_else(|_| {
tracing::error!("Failed to open link {url} in default browser.");
});
});
bridge.on_open_logs(move || {
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
error!("Unable to find project config directories");
return;
}
};
let log_dir = proj_dirs.data_local_dir().join("logs");
open::that_detached(&log_dir).unwrap_or_else(|_| {
tracing::error!("Failed to open logs directory.");
});
});
check_for_updates(window);
stream_logs(&window.as_weak());
}
pub fn refresh_network_interfaces(model: Rc<VecModel<SharedString>>) {
let interfaces = list_afinet_netifas().unwrap_or_default();
model.clear();
for (_, addr) in interfaces {
if addr.is_ipv6() {
continue;
}
let ip = addr.to_string();
model.push(SharedString::from(ip));
}
model.push("0.0.0.0".into());
}
fn check_for_updates(window: &MainWindow) {
let window_weak = window.as_weak();
let _ = slint::spawn_local(Compat::new(async move {
let window = window_weak.unwrap();
let bridge = window.global::<SystemInfoBridge>();
debug!("Checking for updates...");
let client = reqwest::Client::builder()
.user_agent(format!("wsrx/{}", env!("CARGO_PKG_VERSION")))
.build()
.unwrap();
let response = match client
.get("https://api.github.com/repos/XDSEC/WebSocketReflectorX/releases/latest")
.send()
.await
{
Ok(resp) => resp,
Err(e) => {
error!("Failed to fetch the latest version: {}", e);
return;
}
};
if response.status().is_success() {
let json: serde_json::Value = match response.json().await {
Ok(json) => json,
Err(e) => {
error!("Failed to parse the response: {}", e);
return;
}
};
let version = json["tag_name"].as_str().unwrap_or("0.0.0");
let current_version = env!("CARGO_PKG_VERSION");
if version != current_version {
bridge.set_has_updates(true);
warn!("Update available: {}", version);
} else {
bridge.set_has_updates(false);
debug!("No update available.");
}
} else {
error!(
"Failed to fetch the latest version: {} {:?}",
response.status(),
response.text().await
);
}
}));
}
#[derive(Clone, Debug, Deserialize, Default)]
struct LogEntryFields {
pub message: String,
}
#[derive(Clone, Debug, Deserialize, Default)]
struct LogEntry {
pub timestamp: DateTime<Utc>,
pub level: String,
pub target: String,
pub fields: LogEntryFields,
}
fn stream_logs(window: &slint::Weak<MainWindow>) {
let window = window.clone();
slint::spawn_local(Compat::new(async move {
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
eprintln!("Unable to find project config directories");
return;
}
};
let log_file = proj_dirs.data_local_dir().join("logs").join("wsrx.log");
let mut timer = tokio::time::interval(tokio::time::Duration::from_secs(1));
let mut lines = fs::File::open(&log_file)
.await
.map(tokio::io::BufReader::new)
.map(tokio::io::BufReader::lines)
.unwrap();
let interval = tokio::time::Duration::from_secs(5);
loop {
timer.tick().await;
while let Ok(log) = timeout(interval, lines.next_line()).await {
let log = match log {
Ok(Some(log)) => log,
Ok(None) => break,
Err(e) => {
error!("failed to read log: {:?}", e);
break;
}
};
let log_entry = serde_json::from_str::<LogEntry>(&log).unwrap_or_else(|_| {
error!("failed to parse log: {}", log);
LogEntry::default()
});
let window = window.clone();
slint::invoke_from_event_loop(move || {
let window = window.upgrade().unwrap();
let system_info_bridge = window.global::<SystemInfoBridge>();
let local_time = log_entry.timestamp.with_timezone(&chrono::Local);
let log = Log {
timestamp: format!("{}", local_time.format("%Y-%m-%d %H:%M:%S")).into(),
level: log_entry.level.into(),
target: log_entry.target.into(),
message: log_entry.fields.message.into(),
};
let logs = system_info_bridge.get_logs();
let logs = logs.as_any().downcast_ref::<VecModel<Log>>().unwrap();
logs.push(log.to_owned());
})
.ok();
}
}
}))
.ok();
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/bridges/window_control.rs | crates/desktop/src/bridges/window_control.rs | use std::collections::HashMap;
use i_slint_backend_winit::{EventResult, WinitWindowAccessor};
use slint::ComponentHandle;
use winit::window::ResizeDirection;
use crate::{
launcher,
ui::{MainWindow, WindowControlBridge},
};
pub fn setup(window: &MainWindow) {
let mut resize_map = HashMap::new();
resize_map.insert("r".to_string(), ResizeDirection::East);
resize_map.insert("t".to_string(), ResizeDirection::North);
resize_map.insert("tr".to_string(), ResizeDirection::NorthEast);
resize_map.insert("tl".to_string(), ResizeDirection::NorthWest);
resize_map.insert("b".to_string(), ResizeDirection::South);
resize_map.insert("br".to_string(), ResizeDirection::SouthEast);
resize_map.insert("bl".to_string(), ResizeDirection::SouthWest);
resize_map.insert("l".to_string(), ResizeDirection::West);
let window_weak = window.as_weak();
window.window().on_winit_window_event(move |w, e| {
// println!("{:?}", e);
match e {
winit::event::WindowEvent::RedrawRequested => {
let window = window_weak.unwrap();
if window.get_main_window_maximized() != w.is_maximized() {
window.set_main_window_maximized(w.is_maximized());
}
if window.get_main_window_minimized() != w.is_minimized() {
window.set_main_window_minimized(w.is_minimized());
}
EventResult::Propagate
}
winit::event::WindowEvent::CloseRequested => {
launcher::shutdown(&window_weak);
EventResult::PreventDefault
}
_ => EventResult::Propagate,
}
});
let window_weak = window.as_weak();
window.on_main_window_resize(move |resize_direction_str| {
let direction = resize_map
.get(&resize_direction_str.to_lowercase())
.unwrap();
let app_clone = window_weak.unwrap();
app_clone.window().with_winit_window(|winit_window| {
let _ = winit_window.drag_resize_window(*direction);
});
});
let window_control_bridge = window.global::<WindowControlBridge>();
let window_clone_pin = window.as_weak();
window_control_bridge.on_start_drag(move || {
let window_clone = window_clone_pin.unwrap();
window_clone.window().with_winit_window(|winit_window| {
winit_window.drag_window().ok();
});
});
let window_clone_pin = window.as_weak();
window_control_bridge.on_close(move || {
// TODO: system tray implementation
launcher::shutdown(&window_clone_pin);
});
let window_clone_pin = window.as_weak();
window_control_bridge.on_maximize(move || {
let window_clone = window_clone_pin.unwrap();
window_clone.window().with_winit_window(|winit_window| {
#[cfg(target_os = "macos")]
{
use winit::window::Fullscreen;
if winit_window.fullscreen().is_some() {
winit_window.set_fullscreen(None);
} else {
winit_window.set_fullscreen(Some(Fullscreen::Borderless(
winit_window.current_monitor(),
)));
}
}
#[cfg(not(target_os = "macos"))]
{
winit_window.set_maximized(!winit_window.is_maximized());
}
});
});
let window_clone_pin = window.as_weak();
window_control_bridge.on_minimize(move || {
let window_clone = window_clone_pin.unwrap();
window_clone.window().with_winit_window(|winit_window| {
winit_window.set_minimized(!winit_window.is_minimized().unwrap_or(false));
});
});
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/daemon/api_controller.rs | crates/desktop/src/daemon/api_controller.rs | use std::time::Duration;
use axum::{
Json,
body::Body,
extract::{FromRequest, Request, State},
http::{HeaderMap, Method, StatusCode},
response::{IntoResponse, Response},
routing::{get, post},
};
use i_slint_backend_winit::WinitWindowAccessor;
use serde::{Deserialize, Serialize};
use slint::{ComponentHandle, Model, ToSharedString, VecModel};
use tower_http::{
cors::{AllowOrigin, Any, CorsLayer},
trace::TraceLayer,
};
use tracing::{Span, debug};
use wsrx::utils::create_tcp_listener;
use super::latency_worker::update_instance_latency;
use crate::{
bridges::ui_state::sync_scoped_instance,
daemon::{
latency_worker::update_instance_state,
model::{FeatureFlags, InstanceData, ProxyInstance, ScopeData, ServerState},
},
ui::{Instance, InstanceBridge, Scope, ScopeBridge},
};
pub fn router(state: ServerState) -> axum::Router {
let cors_state = state.clone();
let cors_layer = CorsLayer::new()
.allow_methods([Method::GET, Method::POST, Method::PATCH, Method::DELETE])
.allow_headers(Any)
.allow_origin(AllowOrigin::async_predicate(
|origin, _request_parts| async move {
let scopes = cors_state.scopes.read().await;
let allowed_origin = scopes
.iter()
.map(|scope| scope.host.to_string())
.collect::<Vec<_>>();
for o in allowed_origin.iter() {
if origin.to_str().unwrap_or("").ends_with(o) {
return true;
}
}
false
},
));
let any_origin_layer = CorsLayer::new()
.allow_methods([Method::POST])
.allow_headers(Any)
.allow_origin(Any);
axum::Router::new()
.merge(
axum::Router::new()
.route(
"/pool",
get(get_instances)
.post(launch_instance)
.delete(close_instance),
)
.route("/popup", post(popup_window))
.layer(cors_layer)
.with_state(state.clone()),
)
.merge(
axum::Router::new()
.route(
"/connect",
get(get_control_status)
.post(request_control)
.patch(update_website_info),
)
.route(
"/version",
get(|| async { Json(env!("CARGO_PKG_VERSION")) }),
)
.layer(any_origin_layer)
.with_state(state.clone()),
)
.layer(
TraceLayer::new_for_http()
.make_span_with(|request: &Request<Body>| {
tracing::info_span!(
"http",
method = %request.method(),
uri = %request.uri().path(),
)
})
.on_request(())
.on_failure(())
.on_response(|response: &Response, latency: Duration, _span: &Span| {
debug!(
"API Request [{}] in {}ms",
response.status(),
latency.as_millis()
);
}),
)
.with_state::<()>(state)
}
#[derive(Serialize)]
struct InstanceResponse {
label: String,
remote: String,
local: String,
#[deprecated]
from: String,
#[deprecated]
to: String,
latency: i32,
}
impl From<&ProxyInstance> for InstanceResponse {
#[allow(deprecated)]
fn from(instance: &ProxyInstance) -> Self {
InstanceResponse {
label: instance.label.clone(),
remote: instance.remote.clone(),
local: instance.local.clone(),
from: instance.local.clone(),
to: instance.remote.clone(),
latency: instance.latency,
}
}
}
async fn get_instances(
State(state): State<ServerState>, headers: HeaderMap,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let scope = headers
.get("Origin")
.and_then(|h| h.to_str().ok())
.unwrap_or_default();
let instances = state.instances.read().await;
let instances: Vec<InstanceResponse> = instances
.iter()
.filter_map(|instance| {
if instance.scope_host.as_str() == scope {
Some(instance.into())
} else {
None
}
})
.collect::<Vec<_>>();
Ok(Json(instances))
}
async fn launch_instance(
State(state): State<ServerState>, headers: HeaderMap,
axum::Json(instance_data): axum::Json<InstanceData>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let scope = headers
.get("Origin")
.and_then(|h| h.to_str().ok())
.unwrap_or_default()
.to_owned();
let listener = create_tcp_listener(&instance_data.local).await?;
let local = listener
.local_addr()
.expect("failed to bind port")
.to_string();
let mut instances = state.instances.write().await;
if instances.iter().any(|i| i.local.as_str() == local) {
return Err((
StatusCode::BAD_REQUEST,
format!("The local address {local} is already taken by another instance"),
));
}
if let Some(instance) = instances
.iter_mut()
.find(|i| i.remote == instance_data.remote && i.scope_host == scope)
{
// test instance config changes
if instance.label != instance_data.label {
instance.label = instance_data.label.clone();
}
return Ok(Json(instance.data.clone()));
}
let instance = ProxyInstance::new(
instance_data.label.clone(),
scope.clone(),
listener,
instance_data.remote.clone(),
);
let instance_resp: InstanceData = (&instance).into();
instances.push(instance);
drop(instances);
let state_clone = state.clone();
let instance = instance_resp.clone();
tokio::spawn(async move {
let client = reqwest::Client::new();
match update_instance_latency(&instance, &client).await {
Ok(elapsed) => update_instance_state(state_clone, &instance, elapsed).await,
Err(_) => update_instance_state(state_clone, &instance, -1).await,
};
});
match slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
let instance_bridge = ui_handle.global::<InstanceBridge>();
let instances = instance_bridge.get_instances();
let instances = instances
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
let instance = Instance {
label: instance_data.label.as_str().into(),
remote: instance_data.remote.as_str().into(),
local: local.as_str().into(),
latency: -1,
scope_host: scope.as_str().into(),
};
instances.push(instance);
sync_scoped_instance(ui_handle.as_weak());
}) {
Ok(_) => {
debug!("Added instance to UI");
Ok(Json(instance_resp))
}
Err(e) => {
debug!("Failed to update UI: {e}");
Err((
StatusCode::INTERNAL_SERVER_ERROR,
"failed to update UI".to_owned(),
))
}
}
}
#[derive(Deserialize)]
struct CloseInstanceRequest {
#[serde(alias = "key")]
pub local: String,
}
async fn close_instance(
State(state): State<ServerState>, headers: HeaderMap,
axum::Json(req): axum::Json<CloseInstanceRequest>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let scope = headers
.get("Origin")
.and_then(|h| h.to_str().ok())
.unwrap_or_default()
.to_owned();
let mut instances = state.instances.write().await;
let Some(tunnel) = instances.iter().find(|i| i.local.as_str() == req.local) else {
return Err((
StatusCode::BAD_REQUEST,
format!("Tunnel {} not found", req.local),
));
};
if tunnel.scope_host.as_str() != scope {
return Err((
StatusCode::BAD_REQUEST,
format!("Tunnel {} not found in scope {}", req.local, scope),
));
}
instances.retain(|i| i.local.as_str() != req.local);
match slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
let instance_bridge = ui_handle.global::<InstanceBridge>();
let instances = instance_bridge.get_instances();
let instances = instances
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
let mut index = 0;
for i in instances.iter() {
if i.local == req.local {
break;
}
index += 1;
}
instances.remove(index);
sync_scoped_instance(ui_handle.as_weak());
}) {
Ok(_) => {
debug!("Removed instance from UI");
}
Err(e) => {
debug!("Failed to sync state: {e}");
return Err((
StatusCode::INTERNAL_SERVER_ERROR,
"failed to update UI".to_owned(),
));
}
}
Ok(StatusCode::OK)
}
async fn get_control_status(
State(state): State<ServerState>, headers: HeaderMap,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let req_scope = headers
.get("Origin")
.and_then(|h| h.to_str().ok())
.unwrap_or_default()
.to_owned();
let scopes = state.scopes.read().await;
let scope = scopes.iter().find(|s| s.host == req_scope);
if let Some(scope) = scope {
if scope.state == "pending" {
Ok(StatusCode::CREATED)
} else {
Ok(StatusCode::ACCEPTED)
}
} else {
Err((
StatusCode::FORBIDDEN,
format!("Scope {req_scope} not found"),
))
}
}
async fn request_control(
State(state): State<ServerState>, headers: HeaderMap, req: Request<Body>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let req_scope = headers
.get("Origin")
.and_then(|h| h.to_str().ok())
.unwrap_or_default()
.to_owned();
let json_body = axum::Json::<ScopeData>::from_request(req, &state)
.await
.ok();
let (scope_name, scope_features, scope_settings) = if let Some(Json(ScopeData {
name,
features,
settings,
..
})) = json_body
{
(name, features, settings)
} else {
(req_scope.clone(), FeatureFlags::Basic, Default::default())
};
let mut scopes = state.scopes.write().await;
if scopes.iter().any(|scope| scope.host == req_scope) {
return Ok(StatusCode::ACCEPTED);
}
let scope_name = if scope_name.is_empty() {
req_scope.clone()
} else {
scope_name.clone()
};
let scope = ScopeData {
name: scope_name.clone(),
host: req_scope.clone(),
state: "pending".to_string(),
features: scope_features,
settings: scope_settings.clone(),
};
scopes.push(scope);
match slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
let scope_bridge = ui_handle.global::<ScopeBridge>();
let scopes = scope_bridge.get_scopes();
let scopes = scopes.as_any().downcast_ref::<VecModel<Scope>>().unwrap();
if scopes.iter().any(|scope| scope.host == req_scope) {
return;
}
let scope = Scope {
host: req_scope.clone().into(),
name: scope_name.into(),
state: "pending".into(),
features: scope_features.to_shared_string(),
settings: serde_json::to_string(&scope_settings)
.unwrap_or("{}".to_string())
.into(),
};
scopes.push(scope);
}) {
Ok(_) => {
debug!("Added scope to UI");
Ok(StatusCode::OK)
}
Err(e) => {
debug!("Failed to update UI: {e}");
Err((
StatusCode::INTERNAL_SERVER_ERROR,
"failed to sync state".to_owned(),
))
}
}
}
async fn update_website_info(
State(state): State<ServerState>, headers: HeaderMap,
axum::Json(scope_data): axum::Json<ScopeData>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let req_scope = headers
.get("Origin")
.and_then(|h| h.to_str().ok())
.unwrap_or_default()
.to_owned();
let mut scopes = state.scopes.write().await;
if let Some(scope) = scopes.iter_mut().find(|s| s.host == req_scope) {
scope.name = scope_data.name.clone();
scope.features = scope_data.features;
let state_d = scope.state.clone();
match slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
let scope_bridge = ui_handle.global::<ScopeBridge>();
let scopes = scope_bridge.get_scopes();
let scopes = scopes.as_any().downcast_ref::<VecModel<Scope>>().unwrap();
let mut index = 0;
for i in scopes.iter() {
if i.host == req_scope {
break;
}
index += 1;
}
let scope = Scope {
host: req_scope.clone().into(),
name: scope_data.name.clone().into(),
state: state_d.into(),
features: scope_data.features.to_shared_string(),
settings: serde_json::to_string(&scope_data.settings)
.unwrap_or("{}".to_string())
.into(),
};
scopes.set_row_data(index, scope);
}) {
Ok(_) => {
debug!("Updated scope in UI");
}
Err(e) => {
debug!("Failed to update UI: {e}");
return Err((
StatusCode::INTERNAL_SERVER_ERROR,
"failed to update UI".to_owned(),
));
}
}
Ok(StatusCode::OK)
} else {
Err((
StatusCode::FORBIDDEN,
format!("Scope {req_scope} not found"),
))
}
}
async fn popup_window(
State(state): State<ServerState>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
ui_handle.show().ok();
ui_handle.window().with_winit_window(|winit_window| {
winit_window.set_minimized(false);
});
})
.ok();
Ok(StatusCode::OK)
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/daemon/ui_controller.rs | crates/desktop/src/daemon/ui_controller.rs | use slint::{ComponentHandle, Model, ToSharedString, VecModel};
use tracing::{debug, info, warn};
use wsrx::utils::create_tcp_listener;
use super::latency_worker::update_instance_latency;
use crate::{
bridges::ui_state::sync_scoped_instance,
daemon::{
default_label,
latency_worker::update_instance_state,
model::{ProxyInstance, ServerState},
},
ui::{Instance, InstanceBridge, MainWindow, Scope, ScopeBridge},
};
pub async fn on_instance_add(state: &ServerState, remote: &str, local: &str) {
let listener = match create_tcp_listener(local).await {
Ok(listener) => listener,
Err(_) => return,
};
let local = listener
.local_addr()
.expect("failed to bind port")
.to_string();
if state
.instances
.read()
.await
.iter()
.any(|i| i.local.as_str() == local)
{
warn!("Instance already exists: {local}");
return;
}
let remote = remote.to_string();
let scope = "default-scope".to_string();
let instance = ProxyInstance::new(default_label(), &scope, listener, &remote);
let state_clone = state.clone();
let instance_data = (&instance).into();
tokio::spawn(async move {
let client = reqwest::Client::new();
match update_instance_latency(&instance_data, &client).await {
Ok(elapsed) => update_instance_state(state_clone, &instance_data, elapsed).await,
Err(_) => update_instance_state(state_clone, &instance_data, -1).await,
};
});
let label = instance.label.clone();
state.instances.write().await.push(instance);
let state = state.clone();
match slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
let instance_bridge = ui_handle.global::<InstanceBridge>();
let instances = instance_bridge.get_instances();
let instances = instances
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
let instance = Instance {
label: label.as_str().into(),
remote: remote.as_str().into(),
local: local.as_str().into(),
latency: -1,
scope_host: scope.as_str().into(),
};
instances.push(instance);
sync_scoped_instance(ui_handle.as_weak());
}) {
Ok(_) => {
debug!("Added instance to UI");
}
Err(e) => {
debug!("Failed to update UI: {e}");
}
}
}
pub async fn on_instance_del(state: &ServerState, local: &str) {
state
.instances
.write()
.await
.retain(|instance| instance.local.as_str() != local);
let state = state.clone();
let local = local.to_string();
match slint::invoke_from_event_loop(move || {
let ui_handle = state.ui.upgrade().unwrap();
let instance_bridge = ui_handle.global::<InstanceBridge>();
let instances = instance_bridge.get_instances();
let instances = instances
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
let mut index = 0;
for i in instances.iter() {
if i.local == local {
break;
}
index += 1;
}
instances.remove(index);
sync_scoped_instance(ui_handle.as_weak());
}) {
Ok(_) => {
debug!("Removed instance from UI");
}
Err(e) => {
debug!("Failed to sync state: {e}");
}
}
}
pub async fn on_scope_allow(state: &ServerState, ui: slint::Weak<MainWindow>, scope_host: &str) {
let mut scopes = state.scopes.write().await;
let scope_name;
let scope_features;
let scope_settings;
if let Some(scope) = scopes.iter_mut().find(|s| s.host == scope_host) {
scope.state = "allowed".to_string();
scope_name = scope.name.clone();
scope_features = scope.features;
scope_settings = scope.settings.clone();
} else {
return;
}
let scope_host = scope_host.to_string();
match slint::invoke_from_event_loop(move || {
let ui_handle = ui.upgrade().unwrap();
let scope_bridge = ui_handle.global::<ScopeBridge>();
let scopes = scope_bridge.get_scopes();
let scopes = scopes.as_any().downcast_ref::<VecModel<Scope>>().unwrap();
let mut index = 0;
for s in scopes.iter() {
if s.host == scope_host {
break;
}
index += 1;
}
if index < scopes.row_count() {
scopes.set_row_data(
index,
Scope {
host: scope_host.into(),
name: scope_name.into(),
state: "allowed".into(),
features: scope_features.to_shared_string(),
settings: serde_json::to_string(&scope_settings)
.unwrap_or("{}".to_string())
.into(),
},
);
}
}) {
Ok(_) => {
debug!("Updated scope state to allowed");
}
Err(e) => {
debug!("Failed to update UI: {e}");
}
}
}
pub async fn on_scope_del(state: &ServerState, ui: slint::Weak<MainWindow>, scope_host: &str) {
let removed_scope = {
let mut scopes = state.scopes.write().await;
scopes
.iter()
.position(|s| s.host == scope_host)
.map(|index| scopes.remove(index))
};
match removed_scope {
Some(scope) => {
state
.instances
.write()
.await
.retain(|i| i.scope_host.as_str() != scope.host);
info!("Scope {} removed", scope.host);
}
None => return,
};
let scope_host = scope_host.to_string();
let state = state.clone();
let instances: Vec<Instance> = state
.instances
.read()
.await
.iter()
.filter(|i| i.scope_host.as_str() == scope_host)
.map(Into::into)
.collect::<Vec<_>>();
match slint::invoke_from_event_loop(move || {
let ui_handle = ui.upgrade().unwrap();
let scope_bridge = ui_handle.global::<ScopeBridge>();
let scopes = scope_bridge.get_scopes();
let scopes = scopes.as_any().downcast_ref::<VecModel<Scope>>().unwrap();
let mut index = 0;
for s in scopes.iter() {
if s.host == scope_host {
break;
}
index += 1;
}
if index < scopes.row_count() {
scopes.remove(index);
}
let instance_bridge = ui_handle.global::<InstanceBridge>();
let instances_rc = instance_bridge.get_instances();
let instances_rc = instances_rc
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
instances_rc.clear();
for instance in instances.iter() {
instances_rc.push(instance.clone());
}
sync_scoped_instance(ui_handle.as_weak());
}) {
Ok(_) => {
debug!("Removed scope from UI");
}
Err(e) => {
debug!("Failed to update UI: {e}");
}
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/daemon/latency_worker.rs | crates/desktop/src/daemon/latency_worker.rs | use reqwest::Method;
use slint::{ComponentHandle, Model, VecModel};
use thiserror::Error;
use tracing::{debug, warn};
use super::{
model::{FeatureFlags, InstanceData, ServerState},
ui_controller::on_instance_del,
};
use crate::{
bridges::ui_state::sync_scoped_instance,
ui::{Instance, InstanceBridge},
};
pub async fn start(state: ServerState) {
loop {
let instances = state.instances.read().await;
let instances_pure = instances
.iter()
.map(|instance| instance.into())
.collect::<Vec<InstanceData>>();
drop(instances);
let client = reqwest::Client::new();
for instance in instances_pure {
let instance = instance.clone();
let client = client.clone();
let state = state.clone();
tokio::spawn(async move {
let result = update_instance_latency(&instance, &client).await;
if let Ok(elapsed) = result {
update_instance_state(state.clone(), &instance, elapsed).await;
} else {
update_instance_state(state.clone(), &instance, -1).await;
}
if let Err(e) = result {
pingfall(state.clone(), instance.clone(), e).await;
}
});
}
sync_scoped_instance(state.ui.clone());
// Sleep for 5 seconds
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
}
}
#[derive(Debug, Error)]
pub enum LatencyError {
#[error("Request error: {0}")]
Rewqest(#[from] reqwest::Error),
#[error("Non-success status code")]
NonSuccessStatus(u16),
}
pub async fn update_instance_latency(
instance: &InstanceData, client: &reqwest::Client,
) -> Result<i32, LatencyError> {
let req = client
.request(Method::OPTIONS, instance.remote.replace("ws", "http"))
.header("User-Agent", format!("wsrx/{}", env!("CARGO_PKG_VERSION")))
.build()?;
let start_time = std::time::Instant::now();
let resp = client.execute(req).await?;
let elapsed = if resp.status().is_success() {
// always > 0
start_time.elapsed().as_millis() as i32 / 2
} else {
debug!("Failed to ping instance: {}", resp.status());
return Err(LatencyError::NonSuccessStatus(resp.status().as_u16()));
};
Ok(elapsed)
}
pub async fn update_instance_state(state: ServerState, instance: &InstanceData, elapsed: i32) {
for proxy_instance in state.instances.write().await.iter_mut() {
if instance.remote != proxy_instance.remote {
continue;
}
proxy_instance.latency = elapsed;
let window = state.ui.clone();
let instance = instance.clone();
let _ = slint::invoke_from_event_loop(move || {
let window = window.upgrade().unwrap();
let instance_bridge = window.global::<InstanceBridge>();
let instances_rc = instance_bridge.get_instances();
let instances_rc = instances_rc
.as_any()
.downcast_ref::<VecModel<Instance>>()
.unwrap();
if let Some(index) = instances_rc
.iter()
.position(|i| i.local == instance.local.as_str())
{
instances_rc.set_row_data(
index,
Instance {
local: instance.local.as_str().into(),
remote: instance.remote.as_str().into(),
latency: elapsed,
label: instance.label.as_str().into(),
scope_host: instance.scope_host.as_str().into(),
},
);
}
});
break;
}
}
async fn pingfall(state: ServerState, instance: InstanceData, err: LatencyError) {
warn!(
"Pingfall triggered for instance {} due to error: {err:?}",
instance.local
);
let scopes = state.scopes.read().await;
let scope = scopes
.iter()
.find(|scope| scope.host == instance.scope_host.as_str());
debug!("Pingfall settings: {:?}", scope);
if let Some(scope) = scope
&& scope.features.contains(FeatureFlags::PingFall)
{
let settings = scope.settings.get("pingfall");
if let Some(settings) = settings {
let pingfall_settings: super::model::PingFallSettings =
serde_json::from_value(settings.to_owned()).unwrap_or_default();
match err {
LatencyError::NonSuccessStatus(code) => {
if pingfall_settings.status.contains(&code)
|| pingfall_settings.status.is_empty()
{
on_instance_del(&state, &instance.local).await;
}
}
LatencyError::Rewqest(_) => {
if pingfall_settings.drop_unknown {
on_instance_del(&state, &instance.local).await;
}
}
}
}
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/daemon/model.rs | crates/desktop/src/daemon/model.rs | use std::{collections::HashMap, fmt::Display, sync::Arc};
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::{net::TcpListener, sync::RwLock};
use wsrx::tunnel::Tunnel;
use super::default_label;
use crate::ui::{Instance, MainWindow};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InstanceData {
#[serde(default = "default_label")]
pub label: String,
#[serde(alias = "to")]
pub remote: String,
#[serde(alias = "from")]
pub local: String,
#[serde(default)]
pub latency: i32,
#[serde(default)]
pub scope_host: String,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ScopeData {
pub host: String,
pub name: String,
pub state: String,
pub features: FeatureFlags,
#[serde(default)]
pub settings: HashMap<String, Value>,
}
#[derive(Clone)]
pub struct ServerState {
pub ui: slint::Weak<MainWindow>,
pub instances: Arc<RwLock<Vec<ProxyInstance>>>,
pub scopes: Arc<RwLock<Vec<ScopeData>>>,
}
bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct FeatureFlags: u32 {
const Basic = 0b00000001;
const PingFall = 0b00000010;
}
}
#[allow(dead_code)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BasicSettings {}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct PingFallSettings {
pub status: Vec<u16>,
pub drop_unknown: bool,
}
const FEATURE_MAP: &[(&str, FeatureFlags)] = &[
("basic", FeatureFlags::Basic),
("pingfall", FeatureFlags::PingFall),
];
impl FeatureFlags {
pub fn as_feature_vec(&self) -> Vec<&'static str> {
let mut flags = Vec::new();
for (flag_str, flag) in FEATURE_MAP {
if self.contains(*flag) {
flags.push(*flag_str);
}
}
flags
}
}
impl Display for FeatureFlags {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.as_feature_vec().join(","))
}
}
impl Serialize for FeatureFlags {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.as_feature_vec().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for FeatureFlags {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
Ok(Vec::<String>::deserialize(deserializer)?.into_iter().into())
}
}
impl<T> From<T> for FeatureFlags
where
T: Iterator<Item = String>,
{
fn from(flags: T) -> Self {
let mut feature_flags = FeatureFlags::empty();
for flag in flags {
for (flag_str, flag_value) in FEATURE_MAP {
if flag == *flag_str {
feature_flags.insert(*flag_value);
}
}
}
feature_flags
}
}
pub struct ProxyInstance {
pub data: InstanceData,
_tunnel: Tunnel,
}
impl ProxyInstance {
pub fn new(
label: impl AsRef<str>, scope_host: impl AsRef<str>, listener: TcpListener,
remote: impl AsRef<str>,
) -> Self {
let tunnel = Tunnel::new(remote.as_ref(), listener);
Self {
data: InstanceData {
label: label.as_ref().to_string(),
remote: remote.as_ref().to_string(),
local: tunnel.local.clone(),
latency: -1,
scope_host: scope_host.as_ref().to_string(),
},
_tunnel: tunnel,
}
}
}
impl From<&ProxyInstance> for InstanceData {
fn from(value: &ProxyInstance) -> Self {
value.data.clone()
}
}
impl From<&ProxyInstance> for Instance {
fn from(value: &ProxyInstance) -> Self {
Instance {
label: value.label.as_str().into(),
remote: value.remote.as_str().into(),
local: value.local.as_str().into(),
latency: value.latency,
scope_host: value.scope_host.as_str().into(),
}
}
}
impl std::ops::Deref for ProxyInstance {
type Target = InstanceData;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl std::ops::DerefMut for ProxyInstance {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
XDSEC/WebSocketReflectorX | https://github.com/XDSEC/WebSocketReflectorX/blob/144a9fc01c1e65192b331753d26d043f147f0ecb/crates/desktop/src/daemon/mod.rs | crates/desktop/src/daemon/mod.rs | use std::{process, rc::Rc, sync::Arc};
use api_controller::router;
use directories::ProjectDirs;
use model::{ScopeData, ServerState};
use serde::{Deserialize, Serialize};
use slint::{ComponentHandle, Model, ToSharedString, VecModel};
use tokio::{net::TcpListener, sync::RwLock};
use tracing::{debug, error, info, warn};
use crate::ui::{Instance, InstanceBridge, MainWindow, Scope, ScopeBridge, SettingsBridge};
mod api_controller;
mod latency_worker;
mod model;
mod ui_controller;
#[derive(Clone, Debug, Serialize, Deserialize)]
struct ScopesConfig {
scopes: Vec<ScopeData>,
}
pub fn setup(ui: &MainWindow) {
use rustls::crypto;
match crypto::aws_lc_rs::default_provider().install_default() {
Ok(_) => info!("Using `AWS Libcrypto` as default crypto backend."),
Err(err) => {
error!("`AWS Libcrypto` is not available: {:?}", err);
warn!("Try to use `ring` as default crypto backend.");
crypto::ring::default_provider()
.install_default()
.inspect_err(|err| {
error!("`ring` is not available: {:?}", err);
error!("All crypto backend are not available, exiting...");
process::exit(1);
})
.ok();
info!("Using `ring` as default crypto backend.");
}
}
// read config scope
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
error!("Unable to find project config directories");
return;
}
};
let config_file = proj_dirs.config_dir().join("scopes.toml");
let config = match std::fs::read_to_string(&config_file) {
Ok(config) => config,
Err(_) => "".to_owned(),
};
let scopes: ScopesConfig = match toml::from_str(&config) {
Ok(scopes) => scopes,
Err(e) => {
error!("Failed to parse config file: {}", e);
ScopesConfig { scopes: vec![] }
}
};
debug!("Loaded scopes: {:?}", scopes);
let handle = ui.as_weak();
let state_d = ServerState {
ui: handle.clone(),
instances: Arc::new(RwLock::new(vec![])),
scopes: Arc::new(RwLock::new(scopes.scopes.clone())),
};
// Initialize the global state
let instances: Rc<VecModel<Instance>> = Rc::new(VecModel::default());
let scopes_r: Rc<VecModel<Scope>> = Rc::new(VecModel::default());
for scope in scopes.scopes.iter() {
scopes_r.push(Scope {
host: scope.host.clone().into(),
name: scope.name.clone().into(),
state: scope.state.clone().into(),
features: scope.features.to_shared_string(),
settings: serde_json::to_string(&scope.settings)
.unwrap_or("{}".to_string())
.into(),
});
}
let scoped_instances: Rc<VecModel<Instance>> = Rc::new(VecModel::default());
let instances_rc = slint::ModelRc::from(instances.clone());
let scopes_rc = slint::ModelRc::from(scopes_r.clone());
let scoped_instances_rc = slint::ModelRc::from(scoped_instances.clone());
let instance_bridge = ui.global::<InstanceBridge>();
instance_bridge.set_instances(instances_rc);
instance_bridge.set_scoped_instances(scoped_instances_rc);
let state = state_d.clone();
instance_bridge.on_add(move |remote, local| {
let state_cloned = state.clone();
match slint::spawn_local(async_compat::Compat::new(async move {
ui_controller::on_instance_add(&state_cloned, remote.as_str(), local.as_str()).await;
})) {
Ok(_) => {}
Err(e) => {
debug!("Failed to update instance bridge: {e}");
}
}
});
let state = state_d.clone();
instance_bridge.on_del(move |local| {
let state_cloned = state.clone();
match slint::spawn_local(async_compat::Compat::new(async move {
ui_controller::on_instance_del(&state_cloned, local.as_str()).await;
})) {
Ok(_) => {}
Err(e) => {
debug!("Failed to update instance bridge: {e}");
}
}
});
let scope_bridge = ui.global::<ScopeBridge>();
scope_bridge.set_scopes(scopes_rc);
let handle_cloned = handle.clone();
let state = state_d.clone();
scope_bridge.on_allow(move |scope_host| {
let state_cloned = state.clone();
let handle_cloned = handle_cloned.clone();
match slint::spawn_local(async_compat::Compat::new(async move {
ui_controller::on_scope_allow(
&state_cloned,
handle_cloned.clone(),
scope_host.as_str(),
)
.await;
})) {
Ok(_) => {}
Err(e) => {
debug!("Failed to update scope bridge: {e}");
}
}
});
let state_cloned = state_d.clone();
let handle_cloned = handle.clone();
scope_bridge.on_del(move |scope_host| {
let state_cloned = state_cloned.clone();
let handle_cloned = handle_cloned.clone();
match slint::spawn_local(async_compat::Compat::new(async move {
ui_controller::on_scope_del(&state_cloned, handle_cloned.clone(), scope_host.as_str())
.await;
})) {
Ok(_) => {}
Err(e) => {
debug!("Failed to update scope bridge: {e}");
}
}
});
let router = router(state_d.clone());
match slint::spawn_local(async_compat::Compat::new(async move {
let listener = match TcpListener::bind(&format!("{}:{}", "127.0.0.1", 3307)).await {
Ok(listener) => listener,
Err(e) => {
warn!("Failed to bind to port 3307: {e}");
// Fallback to a random port
info!("Falling back to a random port...");
// Bind to a random port
TcpListener::bind("127.0.0.1:0")
.await
.expect("failed to bind port")
}
};
let port = listener.local_addr().unwrap().port();
slint::invoke_from_event_loop(move || {
let ui = handle.upgrade().unwrap();
let settings_bridge = ui.global::<SettingsBridge>();
settings_bridge.set_api_port(port as i32);
settings_bridge.set_online(true);
})
.ok();
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
error!("Unable to find project config directories");
return;
}
};
let lock_file = proj_dirs.data_local_dir().join(".rx.is.alive");
tokio::fs::write(&lock_file, port.to_string())
.await
.unwrap_or_else(|_| {
error!("Failed to write lock file");
std::process::exit(1);
});
info!(
"API server is listening on [[ {} ]]",
listener.local_addr().expect("failed to bind port")
);
axum::serve(listener, router)
.await
.expect("failed to launch server");
})) {
Ok(_) => {}
Err(e) => {
error!("Failed to start API server: {e}");
}
}
let state = state_d.clone();
match slint::spawn_local(async_compat::Compat::new(async move {
latency_worker::start(state).await;
})) {
Ok(_) => {}
Err(e) => {
error!("Failed to start latency worker: {e}");
}
}
}
pub fn save_scopes(ui: &slint::Weak<MainWindow>) {
let window = ui.upgrade().unwrap();
let scope_bridge = window.global::<ScopeBridge>();
let scopes = scope_bridge.get_scopes();
let scopes = scopes.as_any().downcast_ref::<VecModel<Scope>>().unwrap();
let mut scopes_vec = vec![];
for scope in scopes.iter() {
scopes_vec.push(ScopeData {
host: scope.host.to_string(),
name: scope.name.to_string(),
state: scope.state.to_string(),
features: scope
.features
.split(",")
.map(|s| s.trim().to_string())
.into(),
settings: serde_json::from_str(scope.settings.to_string().as_str()).unwrap_or_default(),
});
}
let proj_dirs = match ProjectDirs::from("org", "xdsec", "wsrx") {
Some(dirs) => dirs,
None => {
error!("Unable to find project config directories");
return;
}
};
let config_file = proj_dirs.config_dir().join("scopes.toml");
let config_obj = ScopesConfig { scopes: scopes_vec };
let config = toml::to_string(&config_obj).unwrap_or_else(|e| {
error!("Failed to serialize scopes: {}", e);
String::new()
});
if let Err(e) = std::fs::create_dir_all(proj_dirs.config_dir()) {
error!("Failed to create config directory: {}", e);
return;
}
if let Err(e) = std::fs::write(&config_file, config) {
error!("Failed to write config file: {}", e);
}
debug!("Saved scopes to: {:?}", config_file);
}
fn default_label() -> String {
format!("inst-{:06x}", rand::random::<u32>())
}
| rust | MIT | 144a9fc01c1e65192b331753d26d043f147f0ecb | 2026-01-04T20:21:00.884028Z | false |
tbillington/rust_serverless_runtime | https://github.com/tbillington/rust_serverless_runtime/blob/98eea1e4408a296b263982ce1f10eb25ad119857/src/main.rs | src/main.rs | use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use axum::{
async_trait,
extract::{FromRequestParts, Path, State},
http::{request::Parts, StatusCode},
response::IntoResponse,
routing::get,
Router,
};
use deno_core::{
error::{AnyError, JsError},
op, serde_json, serde_v8, v8, JsRuntime, OpState, RuntimeOptions,
};
use rusqlite::{Connection, OptionalExtension};
use tracing::{error, info};
use tracing_subscriber::prelude::*;
async fn handle_root() -> &'static str {
"Hello rustau!"
}
// HTTP POST /fn/:name curl -d @fn.js localhost:8080/fn/hello
async fn handle_fn_submit(
State(state): State<AppState>,
FunctionName(name): FunctionName,
body: String,
) -> Result<(), AppError> {
let db_file = format!("{name}.db");
let db = Connection::open(&db_file)?;
db.execute("create table if not exists kv (key unique, value)", [])?;
state
.lock()?
.insert(name.clone(), (body, Arc::new(Mutex::new(db))));
info!("added new function: {name}");
Ok(())
}
// HTTP GET /fn/:name curl localhost:8080/fn/hello
async fn handle_fn_execute(
State(state): State<AppState>,
FunctionName(name): FunctionName,
) -> Result<String, AppError> {
let (fn_body, db) = state
.lock()?
.get(&name)
.cloned()
.ok_or_else(|| AppError::UnknownFunction(name.clone()))?;
info!("invoking stored fn: {name}");
run_js(&name, &fn_body, db)
}
#[op]
fn op_log(state: &mut OpState, msg: String) {
// emit the log message prefixed with the name of the function
info!("[{}]: {}", state.borrow::<String>(), msg);
}
#[op]
fn op_kv_set(state: &mut OpState, key: String, value: String) -> Result<(), AnyError> {
let db = state
.borrow_mut::<DB>()
.lock()
// the error from a poisoned lock can't be sent between threads
// so we take it's msg contents and wrap them in an error that is Send
.map_err(|err| AnyError::msg(err.to_string()))?;
db.execute("replace into kv (key, value) values (?1, ?2)", [key, value])?;
Ok(())
}
#[op]
fn op_kv_get(state: &mut OpState, key: String) -> Result<Option<String>, AnyError> {
let db = state
.borrow_mut::<DB>()
.lock()
// the error from a poisoned lock can't be sent between threads
// so we take it's msg contents and wrap them in an error that is Send
.map_err(|err| AnyError::msg(err.to_string()))?;
let result = db
.prepare("select value from kv where key = ?1")?
.query_row([key], |row| row.get(0))
.optional()?;
Ok(result)
}
const RUNTIME_BOOTSTRAP: &str = r#"
globalThis.console = {
log: (...args) => Deno.core.opSync("op_log", args.join(", "))
}
globalThis.set = (key, value) => (Deno.core.opSync("op_kv_set", key, JSON.stringify(value)), value)
globalThis.get = (key) => JSON.parse(Deno.core.opSync("op_kv_get", key))
"#;
fn run_js(name: &str, body: &str, db: DB) -> Result<String, AppError> {
let mut runtime = JsRuntime::new(RuntimeOptions {
extensions: vec![deno_core::Extension::builder()
.ops(vec![op_log::decl(), op_kv_set::decl(), op_kv_get::decl()])
.js(vec![("[runtime]", RUNTIME_BOOTSTRAP)])
.build()],
..Default::default()
});
let state = runtime.op_state();
// inject the name of the function and access to the DB so ops have access
state.borrow_mut().put::<String>(name.to_owned());
state.borrow_mut().put(db);
let last_value = runtime.execute_script(name, body)?;
// parse out the last evaluated expression from the function execution
let scope = &mut runtime.handle_scope();
let local = v8::Local::new(scope, last_value);
let deserialized_value = serde_v8::from_v8::<serde_json::Value>(scope, local)?;
info!("result from \"{name}\": {deserialized_value:#?}");
Ok(deserialized_value.to_string())
}
/// Threadsafe lock around a sqlite database connection
type DB = Arc<Mutex<Connection>>;
/// Threadsafe lock around a map of function name -> body & db connection
type AppState = Arc<Mutex<HashMap<String, (String, DB)>>>;
#[tokio::main]
async fn main() {
register_trace_stdout_listener();
let state: AppState = Default::default();
let app = Router::with_state(state)
.route("/", get(handle_root))
.route("/fn/:name", get(handle_fn_execute).post(handle_fn_submit));
let addr = std::net::SocketAddr::from((std::net::Ipv4Addr::UNSPECIFIED, 8080));
info!("listening on {addr}");
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
}
/// Register logging provider and emit to stdout anything matching INFO or above
fn register_trace_stdout_listener() {
let env_filter = tracing_subscriber::EnvFilter::builder()
.with_default_directive(tracing::metadata::LevelFilter::INFO.into())
.from_env_lossy();
let filter_layer = tracing_subscriber::fmt::layer().with_filter(env_filter);
tracing_subscriber::registry().with(filter_layer).init();
}
/// Type for all errors that can bubble up to the http level
///
/// Implements From for various error types, and IntoResponse to build an HTTP response
#[derive(Debug)]
enum AppError {
SqliteError(String),
LockPoisoned(String),
UnknownFunction(String),
JsError(JsError),
DenoError(String),
V8SerialisationError(String),
}
impl IntoResponse for AppError {
fn into_response(self) -> axum::response::Response {
match self {
AppError::JsError(js_error) => {
format!("error evaluating function: {js_error}").into_response()
}
AppError::UnknownFunction(name) => {
(StatusCode::BAD_REQUEST, format!("unknown function: {name}")).into_response()
}
err => {
error!("internal error: {err:?}");
(StatusCode::INTERNAL_SERVER_ERROR, "internal server error").into_response()
}
}
}
}
impl From<rusqlite::Error> for AppError {
fn from(err: rusqlite::Error) -> Self {
AppError::SqliteError(err.to_string())
}
}
impl<T> From<std::sync::PoisonError<T>> for AppError {
fn from(e: std::sync::PoisonError<T>) -> Self {
AppError::LockPoisoned(e.to_string())
}
}
impl From<deno_core::anyhow::Error> for AppError {
fn from(err: deno_core::anyhow::Error) -> Self {
match err.downcast::<JsError>() {
Ok(js_error) => AppError::JsError(js_error),
Err(err) => AppError::DenoError(err.to_string()),
}
}
}
impl From<serde_v8::Error> for AppError {
fn from(err: serde_v8::Error) -> Self {
AppError::V8SerialisationError(err.to_string())
}
}
/// Extractor that also validates a function name from the URL
struct FunctionName(String);
#[async_trait]
impl<S> FromRequestParts<S> for FunctionName
where
S: Send + Sync,
{
type Rejection = axum::response::Response;
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
let Path(name) = Path::<String>::from_request_parts(parts, state)
.await
.map_err(IntoResponse::into_response)?;
if name.chars().any(|c| !c.is_ascii_alphabetic()) {
let error_msg = format!(
"invalid function name: \"{name}\", only a-z and A-Z characters are allowed"
);
return Err((StatusCode::BAD_REQUEST, error_msg).into_response());
}
Ok(FunctionName(name))
}
}
| rust | MIT | 98eea1e4408a296b263982ce1f10eb25ad119857 | 2026-01-04T20:21:36.565387Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/stack_guard.rs | device/src/stack_guard.rs | #![allow(dead_code)]
use esp_hal::{
assist_debug::DebugAssist, macros::handler, peripherals::ASSIST_DEBUG,
InterruptConfigurable as _,
};
extern "C" {
static _stack_start: u32;
static _stack_end: u32;
}
pub fn enable_stack_guard(assist_debug: &mut ASSIST_DEBUG) {
let mut da = DebugAssist::new(assist_debug);
da.set_interrupt_handler(interrupt_handler);
let stack_top = unsafe { &_stack_start as *const u32 as u32 };
let stack_bottom = unsafe { &_stack_end as *const u32 as u32 };
da.enable_sp_monitor(stack_bottom, stack_top);
}
#[handler(priority = esp_hal::interrupt::Priority::min())]
fn interrupt_handler() {
panic!("stack guard tripped");
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/flash.rs | device/src/flash.rs | mod header;
pub use header::*;
// mod share;
// pub use share::*;
mod log;
pub use log::*;
mod genuine_certificate;
pub use genuine_certificate::*;
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/uart_interrupt.rs | device/src/uart_interrupt.rs | use core::cell::RefCell;
/// UART interrupt handling module
use critical_section::Mutex;
use esp_hal::macros::{handler, ram};
use esp_hal::uart::{self, AnyUart, Uart, UartInterrupt};
use esp_hal::{Blocking, InterruptConfigurable};
use heapless::spsc::{Consumer, Producer, Queue};
use nb;
/// Queue capacity for UART receive buffers
pub const QUEUE_CAPACITY: usize = 8192;
/// Type alias for the UART byte receiver
pub type UartReceiver = Consumer<'static, u8, QUEUE_CAPACITY>;
pub const RX_FIFO_THRESHOLD: u16 = 32;
/// Number of UARTs supported
const NUM_UARTS: usize = 2;
/// Helper function to drain bytes from both UARTs in round-robin fashion
/// Always drains all bytes to prevent interrupt re-triggering
/// Panics if queue overflows - this indicates consumer is too slow
#[ram]
fn drain_uart_to_queue(cs: critical_section::CriticalSection) {
// Get references to both UARTs and producers
let mut uart0 = UARTS[0].borrow_ref_mut(cs);
let mut uart1 = UARTS[1].borrow_ref_mut(cs);
let mut producer0 = UART_PRODUCERS[0].borrow_ref_mut(cs);
let mut producer1 = UART_PRODUCERS[1].borrow_ref_mut(cs);
// Round-robin between both UARTs to ensure fairness
let mut any_data = true;
while any_data {
any_data = false;
// Try to read from UART0 if it exists
if let (Some(uart), Some(producer)) = (uart0.as_mut(), producer0.as_mut()) {
if let Ok(byte) = uart.read_byte() {
producer
.enqueue(byte)
.expect("UART0 receive queue overflow - consumer too slow");
any_data = true;
}
}
// Try to read from UART1 if it exists (upstream)
if let (Some(uart), Some(producer)) = (uart1.as_mut(), producer1.as_mut()) {
if let Ok(byte) = uart.read_byte() {
producer
.enqueue(byte)
.expect("UART1 receive queue overflow - consumer too slow");
any_data = true;
}
}
}
}
/// Type alias for UART instance stored in static memory
type UartInstance = Mutex<RefCell<Option<Uart<'static, Blocking, AnyUart>>>>;
/// Type alias for UART producer stored in static memory
type UartProducer = Mutex<RefCell<Option<Producer<'static, u8, QUEUE_CAPACITY>>>>;
/// Global UART instances for interrupt handling
static UARTS: [UartInstance; NUM_UARTS] = [
Mutex::new(RefCell::new(None)),
Mutex::new(RefCell::new(None)),
];
/// Global UART producers for interrupt handling
static UART_PRODUCERS: [UartProducer; NUM_UARTS] = [
Mutex::new(RefCell::new(None)),
Mutex::new(RefCell::new(None)),
];
/// Global event queues for UARTs
static mut UART_QUEUES: [Queue<u8, QUEUE_CAPACITY>; NUM_UARTS] = [Queue::new(), Queue::new()];
/// Common interrupt handler logic
#[ram]
fn handle_uart_interrupt(uart_index: usize) {
critical_section::with(|cs| {
// Drain both UARTs in round-robin fashion
drain_uart_to_queue(cs);
// Clear the interrupt for the specific UART that triggered this
let mut uart = UARTS[uart_index].borrow_ref_mut(cs);
if let Some(uart) = uart.as_mut() {
uart.clear_interrupts(UartInterrupt::RxFifoFull.into());
}
});
}
/// UART0 interrupt handler
#[ram]
#[handler]
fn uart0_interrupt_handler() {
handle_uart_interrupt(0);
}
/// UART1 interrupt handler
#[ram]
#[handler]
fn uart1_interrupt_handler() {
handle_uart_interrupt(1);
}
/// A handle for interacting with a UART stored in a Mutex
pub struct UartHandle {
uart_num: UartNum,
}
impl UartHandle {
fn uart_index(&self) -> usize {
self.uart_num as usize
}
pub fn write_bytes(&mut self, bytes: &[u8]) -> Result<(), uart::Error> {
let mut up_to = 0;
let uart_index = self.uart_index();
while up_to < bytes.len() {
critical_section::with(|cs| {
let mut uart_opt = UARTS[uart_index].borrow_ref_mut(cs);
let uart = uart_opt.as_mut().unwrap();
// Write as many bytes as possible until the TX FIFO is full
while up_to < bytes.len() {
match uart.write_byte(bytes[up_to]) {
Ok(_) => {
up_to += 1;
}
Err(nb::Error::WouldBlock) => {
// TX FIFO is full, exit inner loop to release critical section
// The hope is that this break will allow other interrupts to fire.
break;
}
Err(nb::Error::Other(e)) => {
// Actual UART error
return Err(e);
}
}
}
Ok(())
})?;
}
Ok(())
}
pub fn flush_tx(&mut self) -> nb::Result<(), esp_hal::uart::Error> {
critical_section::with(|cs| {
let uart_index = self.uart_index();
let mut uart_opt = UARTS[uart_index].borrow_ref_mut(cs);
// Safe to unwrap: UartHandle is only created when UART exists
let uart = uart_opt.as_mut().unwrap();
uart.flush_tx()
})
}
pub fn change_baud(&mut self, baudrate: u32) {
critical_section::with(|cs| {
let uart_index = self.uart_index();
let mut uart_opt = UARTS[uart_index].borrow_ref_mut(cs);
// Safe to unwrap: UartHandle is only created when UART exists
let uart = uart_opt.as_mut().unwrap();
uart.apply_config(&esp_hal::uart::Config {
baudrate,
rx_fifo_full_threshold: RX_FIFO_THRESHOLD,
..Default::default()
})
.unwrap();
})
}
/// Fill buffer with any remaining bytes (for when there are fewer than threshold bytes)
/// This drains both UARTs in round-robin fashion to ensure we never miss data
pub fn fill_buffer(&mut self) {
critical_section::with(|cs| {
drain_uart_to_queue(cs);
});
}
}
/// Register a UART for interrupt handling
pub fn register_uart(
mut uart: Uart<'static, Blocking, AnyUart>,
uart_num: UartNum,
) -> (UartHandle, UartReceiver) {
let uart_index = uart_num as usize;
unsafe {
// Split the queue into producer and consumer
let queue_ref = &raw mut UART_QUEUES[uart_index];
let (producer, consumer) = (*queue_ref).split();
match uart_num {
UartNum::Uart0 => uart.set_interrupt_handler(uart0_interrupt_handler),
UartNum::Uart1 => uart.set_interrupt_handler(uart1_interrupt_handler),
}
uart.listen(UartInterrupt::RxFifoFull);
// Store the UART instance and producer
critical_section::with(|cs| {
UARTS[uart_index].borrow_ref_mut(cs).replace(uart);
UART_PRODUCERS[uart_index]
.borrow_ref_mut(cs)
.replace(producer);
});
// Return handle and consumer
(UartHandle { uart_num }, consumer)
}
}
#[derive(Debug, Clone, Copy)]
pub enum UartNum {
Uart0 = 0,
Uart1 = 1,
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/partitions.rs | device/src/partitions.rs | use crate::ota::OtaPartitions;
use core::cell::RefCell;
use embedded_storage::nor_flash::NorFlash;
use esp_hal::sha::Sha;
use esp_storage::FlashStorage;
use frostsnap_comms::firmware_reader::FirmwareSizeError;
use frostsnap_comms::Sha256Digest;
use frostsnap_embedded::FlashPartition;
pub type EspFlashPartition<'a> = FlashPartition<'a, FlashStorage>;
#[derive(Clone)]
pub struct Partitions<'a> {
pub factory_cert: EspFlashPartition<'a>,
pub ota: OtaPartitions<'a>,
pub nvs: EspFlashPartition<'a>,
}
impl<'a> Partitions<'a> {
fn new(flash: &'a RefCell<FlashStorage>) -> Self {
Self {
factory_cert: EspFlashPartition::new(flash, 0, 0, "factory_cert"),
ota: OtaPartitions {
otadata: EspFlashPartition::new(flash, 0, 0, "otadata"),
ota_0: EspFlashPartition::new(flash, 0, 0, "ota_0"),
ota_1: EspFlashPartition::new(flash, 0, 0, "ota_1"),
},
nvs: EspFlashPartition::new(flash, 0, 0, "nvs"),
}
}
pub fn load(flash: &'a RefCell<FlashStorage>) -> Self {
let table = esp_partition_table::PartitionTable::new(0xd000, 10 * 32);
let mut self_ = Self::new(flash);
for row in table.iter_storage(&mut *flash.borrow_mut(), false) {
let row = match row {
Ok(row) => row,
Err(_) => panic!("unable to read row of partition table"),
};
assert_eq!(row.offset % FlashStorage::ERASE_SIZE as u32, 0);
match row.name() {
"factory_cert" => {
self_
.factory_cert
.set_offset_and_size(row.offset, row.size as u32);
}
"otadata" => {
self_
.ota
.otadata
.set_offset_and_size(row.offset, row.size as u32);
}
"ota_0" => {
self_
.ota
.ota_0
.set_offset_and_size(row.offset, row.size as u32);
}
"ota_1" => {
self_
.ota
.ota_1
.set_offset_and_size(row.offset, row.size as u32);
}
"nvs" => {
self_.nvs.set_offset_and_size(row.offset, row.size as u32);
}
_ => { /*ignore*/ }
}
}
for part in [
self_.factory_cert,
self_.ota.otadata,
self_.ota.ota_0,
self_.ota.ota_1,
self_.nvs,
] {
assert!(part.size() > 0, "partition {} must not be empty", part.tag);
}
self_
}
}
pub trait PartitionExt {
/// Calculate SHA256 digest of partition data
///
/// # Arguments
/// * `sha256` - SHA256 hardware peripheral
/// * `up_to` - Optional byte limit.
fn sha256_digest(&self, sha256: &mut Sha<'_>, up_to: Option<u32>) -> Sha256Digest;
/// Calculate firmware size information
///
/// # Returns
///
/// A tuple containing:
/// - Firmware content size (without signature blocks)
/// - Total size including signature blocks if present
fn firmware_size(&self) -> Result<(u32, u32), FirmwareSizeError>;
}
impl PartitionExt for EspFlashPartition<'_> {
fn sha256_digest(
&self,
sha256: &mut esp_hal::sha::Sha<'_>,
up_to: Option<u32>,
) -> Sha256Digest {
let mut digest = [0u8; 32];
let mut hasher = sha256.start::<esp_hal::sha::Sha256>();
let mut bytes_hashed = 0u32;
// Calculate how many bytes to hash
let bytes_to_hash_total = match up_to {
Some(limit) => limit.min(self.size()),
None => self.size(),
};
for i in 0..self.n_sectors() {
if bytes_hashed >= bytes_to_hash_total {
break;
}
let sector = self.read_sector(i).unwrap();
let bytes_to_hash =
(bytes_to_hash_total - bytes_hashed).min(sector.len() as u32) as usize;
let mut remaining = §or[..bytes_to_hash];
while !remaining.is_empty() {
remaining = nb::block!(hasher.update(remaining)).unwrap();
}
bytes_hashed += bytes_to_hash as u32;
}
nb::block!(hasher.finish(&mut digest)).unwrap();
Sha256Digest(digest)
}
fn firmware_size(&self) -> Result<(u32, u32), FirmwareSizeError> {
frostsnap_comms::firmware_reader::firmware_size(self)
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/screen_test.rs | device/src/screen_test.rs | use alloc::rc::Rc;
use core::cell::RefCell;
use embedded_graphics::prelude::*;
use esp_hal::timer::Timer;
use frostsnap_cst816s::interrupt::TouchReceiver;
use frostsnap_widgets::{
palette::PALETTE, DynWidget, Instant as WidgetInstant, ScreenTest, SuperDrawTarget, Widget,
};
use crate::touch_calibration::adjust_touch_point;
pub fn run<S>(display: S, touch_receiver: &mut TouchReceiver, timer: &impl Timer) -> S
where
S: DrawTarget<Color = embedded_graphics::pixelcolor::Rgb565>,
{
let display_rc = Rc::new(RefCell::new(display));
let mut super_display =
SuperDrawTarget::from_shared(Rc::clone(&display_rc), PALETTE.background);
let mut screen_test_widget = ScreenTest::new();
screen_test_widget.set_constraints(Size::new(240, 280));
let mut last_redraw_time = timer.now();
let _ = super_display.clear(PALETTE.background);
loop {
let now = timer.now();
let now_ms = WidgetInstant::from_millis(now.duration_since_epoch().to_millis());
while let Some(touch_event) = touch_receiver.dequeue() {
let touch_point = adjust_touch_point(Point::new(touch_event.x, touch_event.y));
let is_release = touch_event.action == 1;
screen_test_widget.handle_touch(touch_point, now_ms, is_release);
}
let elapsed_ms = (now - last_redraw_time).to_millis();
if elapsed_ms >= 5 {
let _ = screen_test_widget.draw(&mut super_display, now_ms);
last_redraw_time = now;
}
if screen_test_widget.is_completed() {
break;
}
}
drop(super_display);
Rc::try_unwrap(display_rc)
.unwrap_or_else(|_| panic!("should be only holder"))
.into_inner()
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/lib.rs | device/src/lib.rs | #![no_std]
use alloc::{collections::VecDeque, string::ToString};
use frostsnap_comms::{DeviceSendBody, DeviceSendMessage, WireDeviceSendBody};
use frostsnap_core::DeviceId;
use ui::UserInteraction;
#[macro_use]
extern crate alloc;
/// Display refresh frequency in milliseconds (25ms = 40 FPS)
pub const DISPLAY_REFRESH_MS: u64 = 25;
/// Log macro for debug logging
#[macro_export]
macro_rules! log {
($($arg:tt)*) => {
#[cfg(feature = "debug_log")]
frostsnap_widgets::debug::log(alloc::format!($($arg)*))
};
}
pub mod device_config;
pub mod ds;
pub mod efuse;
pub mod esp32_run;
pub mod factory;
pub mod firmware_size;
pub mod flash;
pub mod frosty_ui;
pub mod io;
pub mod ota;
pub mod panic;
pub mod partitions;
pub mod peripherals;
pub mod resources;
pub mod root_widget;
pub mod screen_test;
pub mod secure_boot;
pub mod stack_guard;
pub mod touch_calibration;
pub mod touch_handler;
pub mod uart_interrupt;
pub mod ui;
pub mod widget_tree;
#[derive(Debug, Clone)]
pub struct UpstreamConnection {
state: UpstreamConnectionState,
messages: VecDeque<DeviceSendMessage<WireDeviceSendBody>>,
announcement: Option<DeviceSendMessage<DeviceSendBody>>,
my_device_id: DeviceId,
}
impl UpstreamConnection {
pub fn new(my_device_id: DeviceId) -> Self {
Self {
state: UpstreamConnectionState::PowerOn,
messages: Default::default(),
announcement: None,
my_device_id,
}
}
pub fn set_state(&mut self, state: UpstreamConnectionState, ui: &mut impl UserInteraction) {
ui.set_upstream_connection_state(state);
match state {
UpstreamConnectionState::PowerOn => {
// HACK: We want to clear messages when resetting the connection
// upstream but keep the downstream announcements otherwise we
// would have to trigger something downstream for them to resend
// it.
self.messages.retain(|msg| msg.from != self.my_device_id);
}
UpstreamConnectionState::Established => {}
UpstreamConnectionState::EstablishedAndCoordAck => {}
}
self.state = state;
}
pub fn get_state(&self) -> UpstreamConnectionState {
self.state
}
pub fn dequeue_message(&mut self) -> Option<DeviceSendMessage<WireDeviceSendBody>> {
if self.state >= UpstreamConnectionState::Established {
if let Some(announcement) = self.announcement.take() {
return Some(announcement.into());
}
}
if self.state == UpstreamConnectionState::EstablishedAndCoordAck {
return self.messages.pop_front();
}
None
}
pub fn send_announcement(&mut self, announcement: DeviceSendBody) {
self.announcement = Some(DeviceSendMessage {
from: self.my_device_id,
body: announcement,
});
}
pub fn send_to_coordinator(
&mut self,
iter: impl IntoIterator<Item = impl Into<DeviceSendBody>>,
) {
self.messages.extend(iter.into_iter().map(|body| {
DeviceSendMessage {
from: self.my_device_id,
body: body.into(),
}
.into()
}));
}
pub fn forward_to_coordinator(&mut self, message: DeviceSendMessage<WireDeviceSendBody>) {
self.messages.push_back(message);
}
fn send_debug(&mut self, message: impl ToString) {
if self.state == UpstreamConnectionState::EstablishedAndCoordAck {
self.send_to_coordinator([DeviceSendBody::Debug {
message: message.to_string(),
}]);
}
}
pub fn has_messages_to_send(&self) -> bool {
self.announcement.is_some() || !self.messages.is_empty()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
pub enum UpstreamConnectionState {
/// We have power from the upstream port
PowerOn,
/// Received magic bytes from upstream device
Established,
/// The coordinator has Ack'd us
EstablishedAndCoordAck,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
pub enum DownstreamConnectionState {
Disconnected,
Connected,
Established,
}
pub type Instant = fugit::Instant<u64, 1, 1_000_000>;
pub type Duration = fugit::Duration<u64, 1, 1_000_000>;
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/root_widget.rs | device/src/root_widget.rs | use crate::widget_tree::WidgetTree;
use embedded_graphics::{
draw_target::DrawTarget,
geometry::{Point, Size},
pixelcolor::Rgb565,
};
use frostsnap_widgets::{DynWidget, FadeSwitcher, Widget};
/// Root widget that contains the main widget tree
pub struct RootWidget {
pub page_switcher: FadeSwitcher<WidgetTree>,
}
impl RootWidget {
pub fn new(initial_widget: WidgetTree, fade_duration_ms: u32) -> Self {
let page_switcher = FadeSwitcher::new(initial_widget, fade_duration_ms);
Self { page_switcher }
}
/// Forward switch_to calls to the FadeSwitcher
pub fn switch_to(&mut self, new_widget: WidgetTree) {
self.page_switcher.switch_to(new_widget);
}
/// Get a mutable reference to the current widget
pub fn current_mut(&mut self) -> &mut WidgetTree {
self.page_switcher.current_mut()
}
}
impl DynWidget for RootWidget {
fn set_constraints(&mut self, max_size: Size) {
self.page_switcher.set_constraints(max_size);
}
fn sizing(&self) -> frostsnap_widgets::Sizing {
self.page_switcher.sizing()
}
fn handle_touch(
&mut self,
point: Point,
current_time: frostsnap_widgets::Instant,
is_release: bool,
) -> Option<frostsnap_widgets::KeyTouch> {
self.page_switcher
.handle_touch(point, current_time, is_release)
}
fn handle_vertical_drag(&mut self, prev_y: Option<u32>, new_y: u32, is_release: bool) {
self.page_switcher
.handle_vertical_drag(prev_y, new_y, is_release)
}
fn force_full_redraw(&mut self) {
self.page_switcher.force_full_redraw();
}
}
impl Widget for RootWidget {
type Color = Rgb565;
fn draw<D: DrawTarget<Color = Self::Color>>(
&mut self,
target: &mut frostsnap_widgets::SuperDrawTarget<D, Self::Color>,
current_time: frostsnap_widgets::Instant,
) -> Result<(), D::Error> {
self.page_switcher.draw(target, current_time)
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/efuse.rs | device/src/efuse.rs | use alloc::rc::Rc;
use alloc::vec::Vec;
use core::cell::RefCell;
use esp_hal::efuse::{self as hal_efuse, Efuse};
use esp_hal::peripherals::EFUSE;
use frostsnap_core::AccessStructureRef;
use rand_chacha::rand_core::RngCore;
use rand_core::SeedableRng;
use reed_solomon;
// See table Table 4.3-1 and Table 4.3-2 from esp32c3 technical reference
const KEY_BLOCKS_OFFSET: u8 = 4;
const WR_DIS_KEY_OFFSET: u8 = 23;
const WR_DIS_KP_OFFSET: u8 = 8;
const READ_COMMAND: u16 = 0x5AA5;
const WRITE_COMMAND: u16 = 0x5A5A;
use esp_hal::peripheral::{Peripheral, PeripheralRef};
pub struct EfuseController<'a> {
pub efuse: PeripheralRef<'a, EFUSE>,
}
impl<'a> EfuseController<'a> {
pub fn new(efuse: impl Peripheral<P = EFUSE> + 'a) -> Self {
Self {
efuse: efuse.into_ref(),
}
}
/// Check if HMAC keys have been initialized
pub fn has_hmac_keys_initialized(&self) -> bool {
let discovered = self.discover_efuses();
// We need at least share_encryption and fixed_entropy keys
// DS key is optional for backward compatibility
discovered.share_encryption.is_some() && discovered.fixed_entropy.is_some()
}
/// Discover which key slots contain which keys based on their purposes
pub fn discover_efuses(&self) -> DiscoveredEfuses {
use esp_hal::hmac::KeyId;
let mut hmac_upstream_keys = Vec::new();
let mut ds_key = None;
// Scan all key slots
for key_id in [
KeyId::Key0,
KeyId::Key1,
KeyId::Key2,
KeyId::Key3,
KeyId::Key4,
KeyId::Key5,
] {
let purpose = Self::key_purpose(key_id);
match purpose {
KeyPurpose::HmacUpstream => {
hmac_upstream_keys.push((key_id as u8, key_id));
}
KeyPurpose::Ds => {
ds_key = Some(key_id);
}
_ => {}
}
}
// Sort HmacUpstream keys by index
hmac_upstream_keys.sort_by_key(|(idx, _)| *idx);
// Assign keys based on order (lower index = share_encryption, higher = fixed_entropy)
let share_encryption = hmac_upstream_keys.first().map(|(_, id)| *id);
let fixed_entropy = hmac_upstream_keys.get(1).map(|(_, id)| *id);
DiscoveredEfuses {
share_encryption,
fixed_entropy,
ds: ds_key,
}
}
/// All key purposes must be written at the same time because efuse Block 0
/// (configuration block) can only be programmed once. Multiple writes to Block
/// 0 would accumulate (OR together) all the bits from each write, potentially setting
/// unintended configuration bits. By batching all key purpose configurations into
/// a single Block 0 write, we ensure only the intended bits are set.
unsafe fn write_key_purposes(
&self,
configs: &[(u8, KeyPurpose, bool)],
) -> Result<(), EfuseError> {
// buff[0..4] -> EFUSE_PGM_DATA0_REG (write disable)
// buff[4..8] -> EFUSE_PGM_DATA1_REG -> EFUSE_RD_REPEAT_DATA0_REG
// buff[8..12] -> EFUSE_PGM_DATA2_REG -> EFUSE_RD_REPEAT_DATA1_REG (KEY_PURPOSE_0,1)
// buff[12..16] -> EFUSE_PGM_DATA3_REG -> EFUSE_RD_REPEAT_DATA2_REG (KEY_PURPOSE_2,3,4,5)
let mut buff = [0x00u8; 32];
let mut write_disable: u32 = 0;
for &(key_num, purpose, read_protect) in configs {
// Set key purpose bits
let kp = purpose as u8;
match key_num {
0 => buff[11] |= kp,
1 => buff[11] |= kp << 4,
2 => buff[12] |= kp,
3 => buff[12] |= kp << 4,
4 => buff[13] |= kp,
5 => buff[13] |= kp << 4,
_ => return Err(EfuseError::EfuseError),
}
// Set write disable bits
write_disable |= 0x01 << (WR_DIS_KEY_OFFSET + key_num);
write_disable |= 0x01 << (WR_DIS_KP_OFFSET + key_num);
// Set read protect if needed
if read_protect {
let read_disable = 0x01u8 << key_num;
buff[4] |= read_disable;
}
// Write-disable the read-protect bits
write_disable |= 0x01;
}
buff[0..4].copy_from_slice(&write_disable.to_le_bytes());
self.write_block(&buff, 0)
}
/// There should only be one series of calls to set_efuse_key accompanied by write_key_purposes
unsafe fn set_efuse_key(
&self,
key_id: esp_hal::hmac::KeyId,
value: [u8; 32],
) -> Result<(), EfuseError> {
if self.is_key_written(key_id) {
return Err(EfuseError::EfuseAlreadyBurned);
}
self.write_block(&value, (key_id as u8) + KEY_BLOCKS_OFFSET)?;
Ok(())
}
fn key_purpose(key_id: esp_hal::hmac::KeyId) -> KeyPurpose {
use esp_hal::hmac::KeyId;
let efuse_field = match key_id {
KeyId::Key0 => hal_efuse::KEY_PURPOSE_0,
KeyId::Key1 => hal_efuse::KEY_PURPOSE_1,
KeyId::Key2 => hal_efuse::KEY_PURPOSE_2,
KeyId::Key3 => hal_efuse::KEY_PURPOSE_3,
KeyId::Key4 => hal_efuse::KEY_PURPOSE_4,
KeyId::Key5 => hal_efuse::KEY_PURPOSE_5,
};
let field_value: u8 = Efuse::read_field_le(efuse_field);
KeyPurpose::try_from(field_value).expect("key purpose was invalid")
}
pub fn is_key_written(&self, key_id: esp_hal::hmac::KeyId) -> bool {
Self::key_purpose(key_id) != KeyPurpose::User
}
pub fn read_efuse(&self, key_id: esp_hal::hmac::KeyId) -> Result<[u8; 32], EfuseError> {
use esp_hal::hmac::KeyId;
let field = match key_id {
KeyId::Key0 => hal_efuse::KEY0,
KeyId::Key1 => hal_efuse::KEY1,
KeyId::Key2 => hal_efuse::KEY2,
KeyId::Key3 => hal_efuse::KEY3,
KeyId::Key4 => hal_efuse::KEY4,
KeyId::Key5 => hal_efuse::KEY5,
};
let bytes: [u8; 32] = Efuse::read_field_le::<[u8; 32]>(field);
Ok(bytes)
}
/// # Safety
unsafe fn write_block(&self, data: &[u8; 32], block_number: u8) -> Result<(), EfuseError> {
let efuse = &*self.efuse;
let mut to_burn: [u32; 11] = [0; 11];
if block_number == 0 {
// Block 0: Use raw data - hardware uses 4x backup scheme
for (i, word) in data.chunks(4).enumerate() {
let n = u32::from_le_bytes(word.try_into().unwrap());
to_burn[i] = n;
}
} else {
// Blocks 2-10: Apply Reed-Solomon encoding
let rs_enc = reed_solomon::Encoder::new(12);
let ecc = rs_enc.encode(data);
for (i, word) in ecc.chunks(4).enumerate() {
let n = u32::from_le_bytes(word.try_into().unwrap());
to_burn[i] = n;
}
}
// Write to efuse controller register
efuse.pgm_data0().write(|w| w.bits(to_burn[0]));
efuse.pgm_data1().write(|w| w.bits(to_burn[1]));
efuse.pgm_data2().write(|w| w.bits(to_burn[2]));
efuse.pgm_data3().write(|w| w.bits(to_burn[3]));
efuse.pgm_data4().write(|w| w.bits(to_burn[4]));
efuse.pgm_data5().write(|w| w.bits(to_burn[5]));
efuse.pgm_data6().write(|w| w.bits(to_burn[6]));
efuse.pgm_data7().write(|w| w.bits(to_burn[7]));
efuse.pgm_check_value0().write(|w| w.bits(to_burn[8]));
efuse.pgm_check_value1().write(|w| w.bits(to_burn[9]));
efuse.pgm_check_value2().write(|w| w.bits(to_burn[10]));
self.send_write_command(block_number);
self.update_read_registers();
if self.get_programming_error_record(block_number) {
Err(EfuseError::EfuseWriteError(block_number))
} else {
Ok(())
}
}
unsafe fn send_write_command(&self, block_number: u8) {
let efuse = &self.efuse;
// Send opcode, blknum and write command
efuse.conf().write(|w| w.op_code().bits(WRITE_COMMAND));
efuse
.cmd()
.write(|w| w.blk_num().bits(block_number).pgm_cmd().set_bit());
// Poll command register until write bit is cleared
while efuse.cmd().read().pgm_cmd().bit_is_set() {}
// Clear efuse program and check registers
efuse.pgm_data0().write(|w| w.bits(0));
efuse.pgm_data1().write(|w| w.bits(0));
efuse.pgm_data2().write(|w| w.bits(0));
efuse.pgm_data3().write(|w| w.bits(0));
efuse.pgm_data4().write(|w| w.bits(0));
efuse.pgm_data5().write(|w| w.bits(0));
efuse.pgm_data6().write(|w| w.bits(0));
efuse.pgm_data7().write(|w| w.bits(0));
efuse.pgm_check_value0().write(|w| w.bits(0));
efuse.pgm_check_value1().write(|w| w.bits(0));
efuse.pgm_check_value2().write(|w| w.bits(0));
}
fn get_programming_error_record(&self, block_number: u8) -> bool {
let efuse = &self.efuse;
match block_number {
0 => {
(efuse.rd_repeat_err1().read().bits() > 0)
|| (efuse.rd_repeat_err2().read().bits() > 0)
}
4 => efuse.rd_rs_err0().read().key0_fail().bit(),
5 => efuse.rd_rs_err0().read().key1_fail().bit(),
6 => efuse.rd_rs_err0().read().key2_fail().bit(),
7 => efuse.rd_rs_err0().read().key3_fail().bit(),
8 => efuse.rd_rs_err0().read().key4_fail().bit(),
9 => efuse.rd_rs_err1().read().key5_fail().bit(),
_ => false,
}
}
unsafe fn update_read_registers(&self) {
let efuse = &self.efuse;
// Send opcode and read command
efuse.conf().write(|w| w.op_code().bits(READ_COMMAND));
efuse.cmd().write(|w| w.read_cmd().set_bit());
// Poll command register until read bit is cleared
while efuse.cmd().read().read_cmd().bit_is_set() {}
}
}
/// Result of discovering which key slots contain which keys
#[derive(Debug, Clone, PartialEq)]
pub struct DiscoveredEfuses {
pub share_encryption: Option<esp_hal::hmac::KeyId>,
pub fixed_entropy: Option<esp_hal::hmac::KeyId>,
pub ds: Option<esp_hal::hmac::KeyId>,
}
/// Builder for writing multiple efuse keys with their purposes in a single operation
pub struct EfuseKeyWriter<'a> {
efuse: &'a EfuseController<'a>,
keys: Vec<(esp_hal::hmac::KeyId, [u8; 32], KeyPurpose)>, // (key_id, value, purpose)
read_protect: bool,
}
impl<'a> EfuseKeyWriter<'a> {
/// Create a new builder for writing efuse keys
pub fn new(efuse: &'a EfuseController<'a>) -> Self {
Self {
efuse,
keys: Vec::new(),
read_protect: false,
}
}
/// Set whether to read-protect the keys
pub fn read_protect(mut self, protect: bool) -> Self {
self.read_protect = protect;
self
}
/// Add a key to be written (generic method for custom key slots)
pub fn add_key(
mut self,
key_id: esp_hal::hmac::KeyId,
value: [u8; 32],
purpose: KeyPurpose,
) -> Self {
self.keys.push((key_id, value, purpose));
self
}
/// Add the share encryption key
pub fn add_encryption_key(mut self, value: [u8; 32]) -> Self {
self.keys.push((
EfuseHmacKeys::ENCRYPTION_KEYID,
value,
KeyPurpose::HmacUpstream,
));
self
}
/// Add the fixed entropy key
pub fn add_entropy_key(mut self, value: [u8; 32]) -> Self {
self.keys.push((
EfuseHmacKeys::FIXED_ENTROPY_KEYID,
value,
KeyPurpose::HmacUpstream,
));
self
}
/// Add the DS (Digital Signature) key for hardware attestation
pub fn add_ds_key(mut self, value: [u8; 32]) -> Self {
self.keys
.push((EfuseHmacKeys::DS_KEYID, value, KeyPurpose::Ds));
self
}
/// Write all configured keys and their purposes to efuses
pub fn write_efuses(self) -> Result<(), EfuseError> {
// First write all the key values
for &(key_id, ref value, _) in &self.keys {
unsafe {
self.efuse.set_efuse_key(key_id, *value)?;
}
}
// Then write all key purposes at once (single Block 0 write)
let configs: Vec<(u8, KeyPurpose, bool)> = self
.keys
.iter()
.map(|&(key_id, _, purpose)| (key_id as u8, purpose, self.read_protect))
.collect();
unsafe {
self.efuse.write_key_purposes(&configs)?;
}
Ok(())
}
}
pub struct EfuseHmacKeys<'a> {
pub share_encryption: EfuseHmacKey<'a>,
pub fixed_entropy: EfuseHmacKey<'a>,
}
impl<'a> EfuseHmacKeys<'a> {
pub const ENCRYPTION_KEYID: esp_hal::hmac::KeyId = esp_hal::hmac::KeyId::Key2;
pub const FIXED_ENTROPY_KEYID: esp_hal::hmac::KeyId = esp_hal::hmac::KeyId::Key3;
pub const DS_KEYID: esp_hal::hmac::KeyId = esp_hal::hmac::KeyId::Key4;
/// Load existing HMAC keys from eFuse memory
/// Keys must have been previously initialized
pub fn load(
efuse: &EfuseController,
hmac: Rc<RefCell<esp_hal::hmac::Hmac<'a>>>,
) -> Result<Self, EfuseError> {
// Discover which slots contain our keys
let discovered = efuse.discover_efuses();
// Ensure we have the required keys
let share_encryption_id = discovered
.share_encryption
.ok_or(EfuseError::EfuseReadError)?;
let fixed_entropy_id = discovered.fixed_entropy.ok_or(EfuseError::EfuseReadError)?;
// Create and return the key handles with discovered slots
Ok(EfuseHmacKeys {
share_encryption: EfuseHmacKey::new(hmac.clone(), share_encryption_id),
fixed_entropy: EfuseHmacKey::new(hmac, fixed_entropy_id),
})
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum KeyPurpose {
User = 0,
Aes128 = 4,
HmacDownstream = 5,
JtagHmacDownstream = 6,
Ds = 7,
HmacUpstream = 8,
SecureBootDigest0 = 9,
SecureBootDigest1 = 10,
SecureBootDigest2 = 11,
}
impl TryFrom<u8> for KeyPurpose {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(KeyPurpose::User),
4 => Ok(KeyPurpose::Aes128),
5 => Ok(KeyPurpose::HmacDownstream),
6 => Ok(KeyPurpose::JtagHmacDownstream),
7 => Ok(KeyPurpose::Ds),
8 => Ok(KeyPurpose::HmacUpstream),
9 => Ok(KeyPurpose::SecureBootDigest0),
10 => Ok(KeyPurpose::SecureBootDigest1),
11 => Ok(KeyPurpose::SecureBootDigest2),
_ => Err(()),
}
}
}
#[derive(Debug)]
pub enum EfuseError {
EfuseReadError,
EfuseWriteError(u8),
EfuseAlreadyBurned,
EfuseError,
ValidationFailed,
}
pub struct EfuseHmacKey<'a> {
hmac: Rc<RefCell<esp_hal::hmac::Hmac<'a>>>,
hmac_key_id: esp_hal::hmac::KeyId,
}
impl<'a> EfuseHmacKey<'a> {
pub fn new(
hmac: Rc<RefCell<esp_hal::hmac::Hmac<'a>>>,
hmac_key_id: esp_hal::hmac::KeyId,
) -> Self {
Self { hmac, hmac_key_id }
}
pub fn hash(
&mut self,
domain_separator: &str,
input: &[u8],
) -> Result<[u8; 32], esp_hal::hmac::Error> {
let mut hmac = self.hmac.borrow_mut();
let mut output = [0u8; 32];
let mut remaining = input;
hmac.init();
nb::block!(hmac.configure(esp_hal::hmac::HmacPurpose::ToUser, self.hmac_key_id))?;
let len_byte = [domain_separator.len() as u8];
let _its_one_byte = nb::block!(hmac.update(&len_byte[..])).unwrap();
let mut ds_remaining = domain_separator.as_bytes();
while !ds_remaining.is_empty() {
ds_remaining = nb::block!(hmac.update(ds_remaining)).unwrap();
}
while !remaining.is_empty() {
remaining = nb::block!(hmac.update(remaining)).unwrap();
}
nb::block!(hmac.finalize(output.as_mut_slice())).unwrap();
Ok(output)
}
pub fn mix_in_rng(&mut self, rng: &mut impl RngCore) -> rand_chacha::ChaCha20Rng {
let mut entropy = [0u8; 64];
rng.fill_bytes(&mut entropy);
let chacha_seed = self.hash("mix-in-rng", &entropy).expect("entropy hash");
rand_chacha::ChaCha20Rng::from_seed(chacha_seed)
}
}
impl frostsnap_core::device::DeviceSecretDerivation for EfuseHmacKey<'_> {
fn get_share_encryption_key(
&mut self,
access_structure_ref: AccessStructureRef,
party_index: frostsnap_core::schnorr_fun::frost::ShareIndex,
coord_key: frostsnap_core::CoordShareDecryptionContrib,
) -> frostsnap_core::SymmetricKey {
let mut src = [0u8; 128];
src[..32].copy_from_slice(access_structure_ref.key_id.to_bytes().as_slice());
src[32..64].copy_from_slice(
access_structure_ref
.access_structure_id
.to_bytes()
.as_slice(),
);
src[64..96].copy_from_slice(party_index.to_bytes().as_slice());
src[96..128].copy_from_slice(coord_key.to_bytes().as_slice());
let output = self.hash("share-encryption", &src).unwrap();
frostsnap_core::SymmetricKey(output)
}
fn derive_nonce_seed(
&mut self,
nonce_stream_id: frostsnap_core::nonce_stream::NonceStreamId,
index: u32,
seed_material: &[u8; 32],
) -> [u8; 32] {
let mut input = [0u8; 52]; // 16 (stream_id) + 4 (index) + 32 (seed_material)
input[..16].copy_from_slice(nonce_stream_id.to_bytes().as_slice());
input[16..20].copy_from_slice(&index.to_be_bytes());
input[20..52].copy_from_slice(seed_material);
self.hash("nonce-seed", &input).unwrap()
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/device_config.rs | device/src/device_config.rs | // Set to false if we are debugging on UART0
pub const SILENCE_PRINTS: bool = true;
#[macro_export]
macro_rules! println {
($($arg:tt)*) => {
{
if !$crate::device_config::SILENCE_PRINTS {
esp_println::println!($($arg)*);
}
}
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/io.rs | device/src/io.rs | use alloc::format;
use bincode::de::read::Reader;
use bincode::enc::write::Writer;
use bincode::error::DecodeError;
use bincode::error::EncodeError;
use core::convert::Infallible;
use core::marker::PhantomData;
use esp_hal::uart::{AnyUart, Uart};
use esp_hal::Blocking;
use esp_hal::{prelude::*, timer, uart, usb_serial_jtag::UsbSerialJtag};
use frostsnap_comms::Direction;
use frostsnap_comms::MagicBytes;
use frostsnap_comms::ReceiveSerial;
use frostsnap_comms::BINCODE_CONFIG;
use crate::uart_interrupt::RX_FIFO_THRESHOLD;
use crate::uart_interrupt::{UartHandle, UartNum, UartReceiver};
pub struct SerialInterface<'a, T, D> {
io: SerialIo<'a>,
magic_bytes_progress: usize,
timer: &'a T,
direction: PhantomData<D>,
}
impl<'a, T, D> SerialInterface<'a, T, D> {
pub fn new_uart(
mut uart: Uart<'static, Blocking, AnyUart>,
uart_num: UartNum,
timer: &'a T,
) -> Self {
// Configure UART with standard settings
let serial_conf = uart::Config {
baudrate: frostsnap_comms::BAUDRATE,
rx_fifo_full_threshold: RX_FIFO_THRESHOLD,
..Default::default()
};
uart.apply_config(&serial_conf).unwrap();
// Register UART for interrupt handling
let (handle, consumer) = crate::uart_interrupt::register_uart(uart, uart_num);
Self {
io: SerialIo::Uart {
handle,
uart_num,
consumer,
},
magic_bytes_progress: 0,
timer,
direction: PhantomData,
}
}
pub fn is_jtag(&self) -> bool {
matches!(self.io, SerialIo::Jtag { .. })
}
}
impl<'a, T, D> SerialInterface<'a, T, D> {
pub fn new_jtag(jtag: UsbSerialJtag<'a, Blocking>, timer: &'a T) -> Self {
Self {
io: SerialIo::Jtag {
jtag,
peek_byte: None,
},
magic_bytes_progress: 0,
timer,
direction: PhantomData,
}
}
}
impl<'a, T, D> SerialInterface<'a, T, D>
where
T: timer::Timer,
D: Direction,
{
pub fn fill_buffer(&mut self) {
// Let the SerialIo implementation handle filling its queue
self.io.fill_queue();
}
pub fn find_and_remove_magic_bytes(&mut self) -> bool {
self.fill_buffer();
// Check if there's any data available
if !self.io.has_data() {
return false;
}
let magic_bytes_progress = self.magic_bytes_progress;
let (progress, found) = frostsnap_comms::make_progress_on_magic_bytes::<D>(
core::iter::from_fn(|| self.io.read_byte()),
magic_bytes_progress,
);
self.magic_bytes_progress = progress;
found.is_some()
}
pub fn send(
&mut self,
message: <D::Opposite as Direction>::RecvType,
) -> Result<(), bincode::error::EncodeError> {
bincode::encode_into_writer(
ReceiveSerial::<D::Opposite>::Message(message),
&mut *self,
BINCODE_CONFIG,
)?;
self.io.nb_flush();
Ok(())
}
pub fn receive(&mut self) -> Option<Result<ReceiveSerial<D>, bincode::error::DecodeError>>
where
ReceiveSerial<D>: bincode::Decode<()>,
{
self.fill_buffer();
if self.io.has_data() {
Some(bincode::decode_from_reader(self, BINCODE_CONFIG))
} else {
None
}
}
pub fn write_magic_bytes(&mut self) -> Result<(), bincode::error::EncodeError> {
bincode::encode_into_writer(
ReceiveSerial::<D::Opposite>::MagicBytes(MagicBytes::default()),
&mut *self,
BINCODE_CONFIG,
)?;
self.io.nb_flush();
Ok(())
}
pub fn write_conch(&mut self) -> Result<(), bincode::error::EncodeError> {
bincode::encode_into_writer(
ReceiveSerial::<D::Opposite>::Conch,
&mut *self,
BINCODE_CONFIG,
)?;
self.io.nb_flush();
Ok(())
}
pub fn send_reset_signal(&mut self) -> Result<(), bincode::error::EncodeError> {
bincode::encode_into_writer(
ReceiveSerial::<D::Opposite>::Reset,
&mut *self,
BINCODE_CONFIG,
)?;
self.flush();
Ok(())
}
/// Blocking flush
pub fn flush(&mut self) {
self.io.flush()
}
pub fn inner_mut(&mut self) -> &mut SerialIo<'a> {
&mut self.io
}
/// Try to read a byte without blocking
pub fn read_byte(&mut self) -> nb::Result<u8, core::convert::Infallible> {
// Then try to read
if let Some(byte) = self.io.read_byte() {
Ok(byte)
} else {
Err(nb::Error::WouldBlock)
}
}
}
impl<T, D> Reader for SerialInterface<'_, T, D>
where
T: timer::Timer,
D: Direction,
{
fn read(&mut self, bytes: &mut [u8]) -> Result<(), DecodeError> {
for (i, target_byte) in bytes.iter_mut().enumerate() {
let start_time = self.timer.now();
*target_byte = loop {
if let Some(next_byte) = self.io.read_byte() {
break next_byte;
}
self.fill_buffer();
if self
.timer
.now()
.checked_duration_since(start_time)
.unwrap()
.to_millis()
> 1_000
{
return Err(DecodeError::UnexpectedEnd {
additional: bytes.len() - i + 1,
});
}
};
}
Ok(())
}
}
impl<T, D> Writer for SerialInterface<'_, T, D> {
fn write(&mut self, bytes: &[u8]) -> Result<(), EncodeError> {
match self.io.write_bytes(bytes) {
Err(e) => Err(EncodeError::OtherString(format!("{e:?}"))),
Ok(()) => Ok(()),
}
}
}
pub enum SerialIo<'a> {
Uart {
handle: UartHandle,
uart_num: UartNum,
consumer: UartReceiver,
},
Jtag {
jtag: UsbSerialJtag<'a, Blocking>,
peek_byte: Option<u8>,
},
}
impl SerialIo<'_> {
/// Check if data is available without consuming it
pub fn has_data(&self) -> bool {
match self {
SerialIo::Uart { consumer, .. } => consumer.peek().is_some(),
SerialIo::Jtag { peek_byte, .. } => peek_byte.is_some(),
}
}
/// Internal method to read a byte from the appropriate source
pub fn read_byte(&mut self) -> Option<u8> {
match self {
SerialIo::Uart {
consumer, handle, ..
} => match consumer.dequeue() {
Some(byte) => Some(byte),
None => {
handle.fill_buffer();
consumer.dequeue()
}
},
SerialIo::Jtag { jtag, peek_byte } => {
// First check if we have a peeked byte
if let Some(byte) = peek_byte.take() {
Some(byte)
} else {
// Otherwise try to read directly
jtag.read_byte().ok()
}
}
}
}
pub fn change_baud(&mut self, baudrate: u32) {
self.flush();
match self {
SerialIo::Uart { handle, .. } => {
handle.change_baud(baudrate);
}
SerialIo::Jtag { .. } => { /* no baud rate for USB jtag */ }
}
}
pub fn fill_queue(&mut self) {
match self {
SerialIo::Uart { handle, .. } => {
// Fill buffer with any bytes that haven't triggered an interrupt (< threshold)
handle.fill_buffer();
}
SerialIo::Jtag { jtag, peek_byte } => {
// For JTAG, fill the peek byte if empty
if peek_byte.is_none() {
*peek_byte = jtag.read_byte().ok();
}
}
}
}
pub fn write_byte_nb(&mut self, byte: u8) -> nb::Result<(), Infallible> {
match self {
SerialIo::Jtag { jtag, .. } => jtag.write_byte_nb(byte),
SerialIo::Uart { handle, .. } => {
// write_bytes is blocking, so we need to check if there's space first
// For now, use write_bytes and convert the error
match handle.write_bytes(&[byte]) {
Ok(_) => Ok(()),
Err(_) => Err(nb::Error::WouldBlock), // Assume any error is WouldBlock
}
}
}
}
pub fn write_bytes(&mut self, bytes: &[u8]) -> Result<(), SerialInterfaceError> {
match self {
SerialIo::Uart { handle, .. } => handle
.write_bytes(bytes)
.map_err(SerialInterfaceError::UartWriteError)?,
SerialIo::Jtag { jtag, .. } => {
let _infallible = jtag.write_bytes(bytes);
}
}
Ok(())
}
pub fn nb_flush(&mut self) {
match self {
SerialIo::Uart { .. } => {
// there is no reason to call this on uart. It will just block until data is
// actually written.
}
SerialIo::Jtag { jtag, .. } => {
// JTAG actually does need to get flushed sometimes. We don't need to block on it
// though so ignore return value.
let _ = jtag.flush_tx_nb();
}
}
}
// Blocking flush. The only time to use this is to make sure everything has been written before
// moving onto something else. Usually you don't want this but it's necessary to do if you write
// something before resetting.
pub fn flush(&mut self) {
match self {
SerialIo::Uart { handle, .. } => {
// just waits until evertything has been written
while let Err(nb::Error::WouldBlock) = handle.flush_tx() {
// wait
}
}
SerialIo::Jtag { jtag, .. } => {
// flushes and waits until everything has been written
let _ = jtag.flush_tx();
}
}
}
}
#[derive(Debug)]
pub enum SerialInterfaceError {
UartReadError,
UartWriteError(uart::Error),
JtagError,
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/touch_handler.rs | device/src/touch_handler.rs | use embedded_graphics::pixelcolor::Rgb565;
use embedded_graphics::prelude::Point;
use frostsnap_cst816s::{interrupt::TouchReceiver, TouchGesture};
use frostsnap_widgets::{debug::OverlayDebug, DynWidget, Widget};
use crate::touch_calibration::adjust_touch_point;
/// Process all pending touch events from the receiver
pub fn process_all_touch_events<W>(
touch_receiver: &mut TouchReceiver,
widget: &mut OverlayDebug<W>,
last_touch: &mut Option<Point>,
current_widget_index: &mut usize,
now_ms: frostsnap_widgets::Instant,
) where
W: Widget<Color = Rgb565>,
{
while let Some(touch_event) = touch_receiver.dequeue() {
// Apply touch calibration adjustments
let touch_point = adjust_touch_point(Point::new(touch_event.x, touch_event.y));
let lift_up = touch_event.action == 1;
let gesture = touch_event.gesture;
let is_vertical_drag = matches!(gesture, TouchGesture::SlideUp | TouchGesture::SlideDown);
let is_horizontal_swipe =
matches!(gesture, TouchGesture::SlideLeft | TouchGesture::SlideRight);
// Handle horizontal swipes to switch between widgets
if is_horizontal_swipe && lift_up {
match gesture {
TouchGesture::SlideLeft => {
// Swipe left: show debug log
if *current_widget_index == 0 {
*current_widget_index = 1;
widget.show_logs();
}
}
TouchGesture::SlideRight => {
// Swipe right: show main widget
if *current_widget_index == 1 {
*current_widget_index = 0;
widget.show_main();
}
}
_ => {}
}
}
// Handle vertical drag for widgets that support it
if is_vertical_drag {
widget.handle_vertical_drag(
last_touch.map(|point| point.y as u32),
touch_point.y as u32,
lift_up,
);
}
if !is_vertical_drag || lift_up {
// Always handle touch events (for both press and release)
// This is important so that lift_up is processed after drag
widget.handle_touch(touch_point, now_ms, lift_up);
}
// Store last touch for drag calculations
if lift_up {
*last_touch = None;
} else {
*last_touch = Some(touch_point);
}
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/panic.rs | device/src/panic.rs | use crate::init_display;
use crate::peripherals::NoCs;
use embedded_graphics::{prelude::*, text::Alignment};
use frostsnap_widgets::string_ext::StringFixed;
const FONT_WIDTH: u32 = 6;
/// Unified panic handler for Frostsnap devices
///
/// This function handles panics by displaying the panic information on the screen
/// with a green background and white text.
pub fn handle_panic(info: &core::panic::PanicInfo) -> ! {
use core::fmt::Write;
use embedded_graphics::pixelcolor::Rgb565;
use esp_hal::{
delay::Delay,
gpio::{Level, Output},
peripherals::Peripherals,
};
unsafe {
// stop all interrupts. Interrupt handlers may panic after this panic
// handler otherwise and clobber the first panic message.
critical_section::acquire();
}
let mut peripherals = unsafe { Peripherals::steal() };
let mut bl = Output::new(&mut peripherals.GPIO1, Level::Low);
let mut delay = Delay::new();
let mut display = init_display!(peripherals: peripherals, delay: &mut delay);
let display_size = display.bounding_box().size;
let _ = display.clear(Rgb565::CSS_DARK_BLUE);
// Draw red ERROR header
use embedded_graphics::mono_font::{ascii::FONT_10X20, MonoTextStyle};
use embedded_graphics::primitives::{PrimitiveStyleBuilder, Rectangle};
use embedded_graphics::text::{Text, TextStyle};
let error_rect = Rectangle::new(Point::new(0, 0), Size::new(display_size.width, 40));
let _ = error_rect
.into_styled(PrimitiveStyleBuilder::new().fill_color(Rgb565::RED).build())
.draw(&mut display);
let text_style = MonoTextStyle::new(&FONT_10X20, Rgb565::WHITE);
let _ = Text::with_text_style(
"ERROR",
Point::new(display_size.width as i32 / 2, 25), // Centered horizontally
text_style,
TextStyle::with_alignment(Alignment::Center),
)
.draw(&mut display);
let mut panic_buf = StringFixed::<512>::with_wrap((display_size.width / FONT_WIDTH) as usize);
let _ = match info.location() {
Some(location) => write!(
&mut panic_buf,
"{}:{} {}",
location.file().split('/').next_back().unwrap_or(""),
location.line(),
info
),
None => write!(&mut panic_buf, "{}", info),
};
// Draw panic text
let _ = embedded_graphics::text::Text::with_alignment(
panic_buf.as_str(),
embedded_graphics::geometry::Point::new(0, 50), // Move panic text below header
embedded_graphics::mono_font::MonoTextStyle::new(
&embedded_graphics::mono_font::ascii::FONT_6X10,
Rgb565::CSS_WHITE,
),
Alignment::Left,
)
.draw(&mut display);
// Draw contact info at bottom
let contact_style = MonoTextStyle::new(&FONT_10X20, Rgb565::CSS_WHITE);
let _ = Text::with_text_style(
"Contact\nsupport@frostsnap.com",
Point::new(display_size.width as i32 / 2, 240), // Centered horizontally
contact_style,
TextStyle::with_alignment(Alignment::Center),
)
.draw(&mut display);
bl.set_high();
#[allow(clippy::empty_loop)]
loop {}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/widget_tree.rs | device/src/widget_tree.rs | use alloc::boxed::Box;
use frostsnap_core::{
device::{restoration::EnterBackupPhase, KeyGenPhase3, SignPhase1},
AccessStructureRef,
};
use frostsnap_widgets::{
backup::{BackupDisplay, EnterShareScreen},
keygen_check::KeygenCheck,
layout::*,
sign_prompt::SignTxPrompt,
DeviceNameScreen, FirmwareUpgradeConfirm, FirmwareUpgradeProgress, HoldToConfirm,
SignMessageConfirm, Standby, Text,
};
use crate::ui::FirmwareUpgradeStatus;
/// The widget tree represents the current UI state as a tree of widgets
#[derive(frostsnap_macros::Widget)]
#[widget_crate(frostsnap_widgets)]
pub enum WidgetTree {
/// Standby screen (can show startup/empty, welcome, or key info)
Standby(Box<Standby>),
/// Device naming screen
DeviceNaming(Box<DeviceNameScreen>),
/// Keygen confirmation screen
KeygenCheck {
widget: Box<KeygenCheck>,
phase: Option<Box<KeyGenPhase3>>,
},
/// Sign transaction prompt screen
SignTxPrompt {
widget: Box<SignTxPrompt>,
phase: Option<Box<SignPhase1>>,
},
/// Sign test message prompt screen
SignTestPrompt {
widget: Box<SignMessageConfirm>,
phase: Option<Box<SignPhase1>>,
},
/// Firmware upgrade confirmation screen
FirmwareUpgradeConfirm {
widget: Box<FirmwareUpgradeConfirm>,
firmware_hash: [u8; 32],
firmware_size: u32,
confirmed: bool,
},
/// Firmware upgrade progress screen
FirmwareUpgradeProgress {
widget: Box<FirmwareUpgradeProgress>,
status: FirmwareUpgradeStatus,
},
/// New name confirmation prompt
NewNamePrompt {
widget: Box<HoldToConfirm<Text>>,
new_name: Option<frostsnap_comms::DeviceName>,
},
/// Device wipe confirmation prompt
WipeDevicePrompt {
widget: Box<HoldToConfirm<Text>>,
confirmed: bool,
},
/// Display backup screen
DisplayBackup {
widget: Box<BackupDisplay>,
access_structure_ref: Option<AccessStructureRef>,
},
/// Display Bitcoin address screen with derivation path
AddressDisplay(Box<Center<frostsnap_widgets::AddressWithPath>>),
/// Enter backup screen
EnterBackup {
widget: Box<EnterShareScreen>,
phase: Option<EnterBackupPhase>,
},
}
impl Default for WidgetTree {
fn default() -> Self {
WidgetTree::Standby(Box::new(Standby::new()))
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/resources.rs | device/src/resources.rs | //! Device resources including provisioned crypto state and partitions
use alloc::boxed::Box;
use core::cell::RefCell;
use esp_storage::FlashStorage;
use frostsnap_comms::{Downstream, Upstream};
use rand_chacha::ChaCha20Rng;
use crate::{
ds::HardwareDs,
efuse::EfuseHmacKeys,
flash::VersionedFactoryData,
frosty_ui::FrostyUi,
io::SerialInterface,
ota::OtaPartitions,
partitions::{EspFlashPartition, Partitions},
peripherals::DevicePeripherals,
};
/// Type alias for serial interfaces
type Serial<'a, D> = SerialInterface<'a, Timer<Timer0<TIMG0>, Blocking>, D>;
use esp_hal::{
gpio::{AnyPin, Input},
peripherals::TIMG0,
rsa::Rsa,
sha::Sha,
timer::timg::{Timer, Timer0},
uart::Uart,
usb_serial_jtag::UsbSerialJtag,
Blocking,
};
/// Device resources containing provisioned state and runtime partitions
pub struct Resources<'a> {
/// Provisioned RNG with device entropy mixed in
pub rng: ChaCha20Rng,
/// HMAC keys from efuses
pub hmac_keys: EfuseHmacKeys<'a>,
/// Hardware Ds for attestation (None for dev devices)
pub ds: Option<HardwareDs<'a>>,
/// RSA hardware accelerator
pub rsa: Rsa<'a, Blocking>,
/// Factory certificate (for production devices)
pub certificate: Option<frostsnap_comms::genuine_certificate::Certificate>,
/// NVS partition for mutation log
pub nvs: EspFlashPartition<'a>,
/// OTA partitions for firmware updates
pub ota: OtaPartitions<'a>,
/// User interface
pub ui: FrostyUi<'a>,
// Runtime peripherals needed by esp32_run
pub timer: &'a Timer<Timer0<TIMG0>, Blocking>,
pub sha256: Sha<'a>,
pub upstream_serial: Serial<'a, Upstream>,
pub downstream_serial: Serial<'a, Downstream>,
pub downstream_detect: Input<'a, AnyPin>,
}
impl<'a> Resources<'a> {
/// Create serial interfaces from UARTs and JTAG
fn create_serial_interfaces(
timer: &'static Timer<Timer0<TIMG0>, Blocking>,
uart_upstream: Option<Uart<'static, Blocking>>,
uart_downstream: Uart<'static, Blocking>,
jtag: UsbSerialJtag<'a, Blocking>,
upstream_detect: &Input<'a, AnyPin>,
) -> (Serial<'a, Upstream>, Serial<'a, Downstream>) {
let detect_device_upstream = upstream_detect.is_low();
let upstream_serial = if detect_device_upstream {
log!("upstream set to uart");
let uart = uart_upstream.expect("upstream UART should exist when detected");
SerialInterface::new_uart(uart, crate::uart_interrupt::UartNum::Uart1, timer)
} else {
log!("upstream set to jtag");
SerialInterface::new_jtag(jtag, timer)
};
let downstream_serial = SerialInterface::new_uart(
uart_downstream,
crate::uart_interrupt::UartNum::Uart0,
timer,
);
(upstream_serial, downstream_serial)
}
/// Initialize resources for production device
/// Factory data is required for production devices
pub fn init_production(
peripherals: Box<DevicePeripherals<'a>>,
flash: &'a RefCell<FlashStorage>,
) -> Box<Self> {
let (partitions, factory_data) = Self::read_flash_data(flash);
// Production devices must have factory data
let factory_data = factory_data.expect("Production device must have factory data");
// Production devices must be provisioned at the factory
if !peripherals.efuse.has_hmac_keys_initialized() {
panic!("Production device must be provisioned at the factory!");
}
// Destructure peripherals to take what we need
let DevicePeripherals {
timer,
ui_timer,
display,
touch_receiver,
sha256,
ds,
rsa,
hmac,
efuse,
uart_upstream,
uart_downstream,
jtag,
upstream_detect,
downstream_detect,
mut initial_rng,
..
} = *peripherals;
// Load existing keys using the moved hmac
let mut hmac_keys = EfuseHmacKeys::load(&efuse, hmac.clone())
.expect("Failed to load HMAC keys from efuses");
let rng: ChaCha20Rng = hmac_keys.fixed_entropy.mix_in_rng(&mut initial_rng);
// Create UI with display and touch receiver (using ui_timer)
let ui = FrostyUi::new(display, touch_receiver, ui_timer);
// Extract factory data
let factory = factory_data.into_factory_data();
// Create HardwareDs for production devices
let ds = Some(HardwareDs::new(ds, factory.ds_encrypted_params.clone()));
let rsa = Rsa::new(rsa);
// Extract certificate from factory data
let certificate = Some(factory.certificate);
// Create serial interfaces
let (upstream_serial, downstream_serial) = Self::create_serial_interfaces(
timer,
uart_upstream,
uart_downstream,
jtag,
&upstream_detect,
);
Box::new(Self {
rng,
hmac_keys,
ds,
rsa,
certificate,
nvs: partitions.nvs,
ota: partitions.ota,
ui,
timer,
sha256,
upstream_serial,
downstream_serial,
downstream_detect,
})
}
/// Initialize resources for development device
/// Factory data is optional for dev devices
pub fn init_dev(
peripherals: Box<DevicePeripherals<'a>>,
flash: &'a RefCell<FlashStorage>,
) -> Box<Self> {
let (partitions, factory_data) = Self::read_flash_data(flash);
// Dev devices must be provisioned before reaching this point
if !peripherals.efuse.has_hmac_keys_initialized() {
panic!("Dev device must be provisioned before initialization!");
}
// Destructure peripherals to take what we need
let DevicePeripherals {
timer,
ui_timer,
display,
touch_receiver,
sha256,
ds,
rsa,
hmac,
efuse,
uart_upstream,
uart_downstream,
jtag,
upstream_detect,
downstream_detect,
mut initial_rng,
..
} = *peripherals;
// Load existing keys using the moved hmac
let mut hmac_keys = EfuseHmacKeys::load(&efuse, hmac.clone())
.expect("Failed to load HMAC keys from efuses");
let rng: ChaCha20Rng = hmac_keys.fixed_entropy.mix_in_rng(&mut initial_rng);
// Create UI with display and touch receiver (using ui_timer)
let ui = FrostyUi::new(display, touch_receiver, ui_timer);
// Create HardwareDs if factory data is present (dev devices might have it)
let (ds, certificate) = if let Some(factory_data) = factory_data {
let factory = factory_data.into_factory_data();
(
Some(HardwareDs::new(ds, factory.ds_encrypted_params)),
Some(factory.certificate),
)
} else {
// Dev device without factory data - no hardware RSA
(None, None)
};
let rsa = Rsa::new(rsa);
// Create serial interfaces
let (upstream_serial, downstream_serial) = Self::create_serial_interfaces(
timer,
uart_upstream,
uart_downstream,
jtag,
&upstream_detect,
);
Box::new(Self {
rng,
hmac_keys,
ds,
certificate,
rsa,
nvs: partitions.nvs,
ota: partitions.ota,
ui,
timer,
sha256,
upstream_serial,
downstream_serial,
downstream_detect,
})
}
/// Read flash partitions and data common to both dev and prod
fn read_flash_data(
flash: &'a RefCell<FlashStorage>,
) -> (Partitions<'a>, Option<VersionedFactoryData>) {
// Load all partitions
let partitions = Partitions::load(flash);
// Try to read factory data (may not exist on dev devices)
let factory_data = VersionedFactoryData::read(partitions.factory_cert).ok();
(partitions, factory_data)
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/firmware_size.rs | device/src/firmware_size.rs | //! # ESP32 Firmware Size Calculation
//!
//! This module implements an algorithm to determine the actual size of ESP32 firmware
//! stored in a flash partition. This is necessary because ESP32 firmware partitions
//! are fixed-size containers filled with 0xFF padding, but we need to know where the
//! actual firmware content ends.
//!
//! ## Background
//!
//! When firmware is written to an ESP32 partition:
//! - The partition has a fixed size (e.g., 1.25MB)
//! - The actual firmware might be much smaller (e.g., 500KB)
//! - The remaining space is filled with 0xFF bytes (erased flash state)
//! - The device has no filesystem to track the actual firmware size
//!
//! When Secure Boot v2 is enabled, firmware includes additional signature blocks:
//! - Signature blocks are 4KB sectors containing RSA signatures and public keys
//! - They are identified by magic bytes: `0xE7, 0x02, 0x00, 0x00`
//! - The algorithm scans all sectors to locate signature blocks rather than
//! trying to predict complex MMU page alignment schemes
//!
//! ## Algorithm
//!
//! The algorithm mimics what `espflash` does to calculate firmware size:
//!
//! 1. **Read the image header** (24 bytes) containing magic byte 0xE9 and segment count
//! 2. **Parse all segments** sequentially to find where the last one ends
//! 3. **Apply 16-byte alignment** as required by the ESP32 bootloader
//! 4. **Add 32 bytes if a SHA256 digest is appended** (`append_digest == 1`)
//! 5. **Scan for Secure Boot v2 signature blocks** and include their size if present
//!
//! ## Why This Is Needed
//!
//! This allows us to:
//! - Calculate the same SHA256 hash that external tools like `sha256sum` would produce
//! - Verify firmware integrity by comparing device-calculated and externally-calculated hashes
//! - Support deterministic/reproducible builds where the same source produces identical binaries
//! - Most importantly, implement secure boot.
//!
//! ## Future Improvements
//!
//! ESP-HAL v1.0 will provide APIs for parsing ESP32 image formats, but we cannot use
//! them yet as v1.0 hasn't been properly released. Once a stable release is available
//! and we upgrade, we should consider using the official parsing utilities instead of
//! this implementation.
use crate::partitions::EspFlashPartition;
use alloc::string::String;
use frostsnap_embedded::{FlashPartition, SECTOR_SIZE};
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct ImageHeader {
pub magic: u8,
pub segment_count: u8,
pub flash_mode: u8,
pub flash_config: u8,
pub entry: u32,
// extended header part
pub wp_pin: u8,
pub clk_q_drv: u8,
pub d_cs_drv: u8,
pub gd_wp_drv: u8,
pub chip_id: u16,
pub min_rev: u8,
pub min_chip_rev_full: u16,
pub max_chip_rev_full: u16,
pub reserved: [u8; 4],
pub append_digest: u8,
}
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
pub struct SegmentHeader {
pub addr: u32,
pub length: u32,
}
#[derive(Debug)]
pub enum FirmwareSizeError {
IoError(String),
InvalidMagic(u8),
InvalidHeaderSize,
InvalidSegmentCount(u8),
SegmentTooLarge(u32),
SectorOutOfBounds(u32),
CorruptedSegmentHeader,
}
impl core::fmt::Display for FirmwareSizeError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
FirmwareSizeError::IoError(msg) => write!(f, "I/O error: {}", msg),
FirmwareSizeError::InvalidMagic(magic) => write!(
f,
"Invalid firmware header magic: 0x{:02X}, expected 0xE9",
magic
),
FirmwareSizeError::InvalidHeaderSize => write!(f, "Firmware header too small"),
FirmwareSizeError::InvalidSegmentCount(count) => {
write!(f, "Invalid segment count: {}", count)
}
FirmwareSizeError::SegmentTooLarge(size) => {
write!(f, "Segment size too large: {} bytes", size)
}
FirmwareSizeError::SectorOutOfBounds(sector) => {
write!(f, "Sector {} is out of bounds", sector)
}
FirmwareSizeError::CorruptedSegmentHeader => write!(f, "Corrupted segment header"),
}
}
}
// Constants from espflash
pub const ESP_MAGIC: u8 = 0xE9;
pub const HEADER_SIZE: usize = core::mem::size_of::<ImageHeader>();
pub const SEGMENT_HEADER_SIZE: usize = core::mem::size_of::<SegmentHeader>();
pub const MAX_SEGMENTS: u8 = 16; // Reasonable limit
pub const MAX_SEGMENT_SIZE: u32 = 16 * 1024 * 1024; // 16MB limit
/// Calculate the actual size of ESP32 firmware in a partition.
///
/// This function parses the ESP32 firmware image format to determine where the
/// actual firmware content ends within the partition.
///
/// # Arguments
///
/// * `partition` - The flash partition containing the firmware
///
/// # Returns
///
/// A tuple containing:
/// - First value: Size of firmware content only (header + segments + padding + digest)
/// - Second value: Total size including Secure Boot v2 signature blocks if present
///
/// The first value is suitable for:
/// - Calculating SHA256 hashes for secure boot
/// - Determining firmware content size
///
/// The second value is suitable for:
/// - Determining total bytes written to flash partition
/// - Calculating SHA256 hashes to present to user and compare against deterministic builds
/// - OTA update size validation
///
/// # Errors
///
/// Returns an error if:
/// - The partition cannot be read
/// - The firmware header is invalid or corrupted
/// - Segment headers are malformed
pub fn firmware_size(partition: &EspFlashPartition) -> Result<(u32, u32), FirmwareSizeError> {
// Read and validate the first sector
let first_sector_array = FlashPartition::read_sector(partition, 0)
.map_err(|e| FirmwareSizeError::IoError(format!("Failed to read first sector: {:?}", e)))?;
let first_sector = &first_sector_array[..];
if first_sector.len() < HEADER_SIZE {
return Err(FirmwareSizeError::InvalidHeaderSize);
}
// Safe header parsing - manual field extraction
//
// TODO: Use esp-hal v1.0 has a lib to do this for us so we leave this spaghetti here for now.
let header = ImageHeader {
magic: first_sector[0],
segment_count: first_sector[1],
flash_mode: first_sector[2],
flash_config: first_sector[3],
entry: u32::from_le_bytes([
first_sector[4],
first_sector[5],
first_sector[6],
first_sector[7],
]),
wp_pin: first_sector[8],
clk_q_drv: first_sector[9],
d_cs_drv: first_sector[10],
gd_wp_drv: first_sector[11],
chip_id: u16::from_le_bytes([first_sector[12], first_sector[13]]),
min_rev: first_sector[14],
min_chip_rev_full: u16::from_le_bytes([first_sector[15], first_sector[16]]),
max_chip_rev_full: u16::from_le_bytes([first_sector[17], first_sector[18]]),
reserved: [
first_sector[19],
first_sector[20],
first_sector[21],
first_sector[22],
],
append_digest: first_sector[23],
};
// Validate magic number
if header.magic != ESP_MAGIC {
return Err(FirmwareSizeError::InvalidMagic(header.magic));
}
// Validate segment count
if header.segment_count == 0 || header.segment_count > MAX_SEGMENTS {
return Err(FirmwareSizeError::InvalidSegmentCount(header.segment_count));
}
// Process segments safely
let mut current_pos = HEADER_SIZE as u32;
let mut max_data_end = current_pos;
// read all the segments to find where the last one ends.
for _segment_idx in 0..header.segment_count {
let segment_header = read_segment_header_safe(partition, current_pos)?;
// Validate segment length
if segment_header.length > MAX_SEGMENT_SIZE {
return Err(FirmwareSizeError::SegmentTooLarge(segment_header.length));
}
let segment_data_end = current_pos
.checked_add(SEGMENT_HEADER_SIZE as u32)
.and_then(|pos| pos.checked_add(segment_header.length))
.ok_or(FirmwareSizeError::CorruptedSegmentHeader)?;
max_data_end = max_data_end.max(segment_data_end);
current_pos = segment_data_end;
}
// Calculate firmware end with padding (following ESP-IDF bootloader logic)
// The bootloader's process_checksum function handles padding after all segments
// First: segments are already processed, max_data_end is the end of all segment data
let unpadded_length = max_data_end;
// Add space for checksum byte
let length_with_checksum = unpadded_length + 1;
// Pad to next full 16 byte block (matching bootloader's logic)
let padded_length = (length_with_checksum + 15) & !15;
let mut firmware_end = padded_length;
// Add digest if present (following espflash logic)
if header.append_digest == 1 {
firmware_end = firmware_end
.checked_add(32)
.ok_or(FirmwareSizeError::CorruptedSegmentHeader)?;
}
// Look for Secure Boot v2 signature block by scanning sectors
if let Some(signature_sector) =
frostsnap_comms::firmware_reader::find_signature_sector(partition)
{
// Found signature block, firmware ends at end of signature sector
let total_size = (signature_sector + 1) * (SECTOR_SIZE as u32);
Ok((firmware_end, total_size))
} else {
Ok((firmware_end, firmware_end))
}
}
fn read_segment_header_safe(
partition: &EspFlashPartition,
pos: u32,
) -> Result<SegmentHeader, FirmwareSizeError> {
let sector_size = SECTOR_SIZE as u32;
let sector_num = pos / sector_size;
let sector_offset = (pos % sector_size) as usize;
// Check bounds
if sector_num >= partition.n_sectors() {
return Err(FirmwareSizeError::SectorOutOfBounds(sector_num));
}
// Read the sector containing the header
let sector = FlashPartition::read_sector(partition, sector_num).map_err(|e| {
FirmwareSizeError::IoError(format!("Failed to read sector for segment header: {:?}", e))
})?;
if sector_offset + SEGMENT_HEADER_SIZE <= sector.len() {
// Header fits in current sector
let end_pos = sector_offset + SEGMENT_HEADER_SIZE;
Ok(parse_segment_header(§or[sector_offset..end_pos]))
} else {
// Header spans sectors - reconstruct safely
let mut header_bytes = [0u8; SEGMENT_HEADER_SIZE];
let first_part = sector.len().saturating_sub(sector_offset);
if first_part > 0 && sector_offset < sector.len() {
header_bytes[..first_part].copy_from_slice(§or[sector_offset..]);
}
if first_part < SEGMENT_HEADER_SIZE {
let next_sector_num = sector_num + 1;
if next_sector_num >= partition.n_sectors() {
return Err(FirmwareSizeError::SectorOutOfBounds(next_sector_num));
}
let next_sector =
FlashPartition::read_sector(partition, next_sector_num).map_err(|e| {
FirmwareSizeError::IoError(format!(
"Failed to read next sector for spanning segment header: {:?}",
e
))
})?;
let remaining = SEGMENT_HEADER_SIZE - first_part;
if remaining <= next_sector.len() {
header_bytes[first_part..].copy_from_slice(&next_sector[..remaining]);
} else {
return Err(FirmwareSizeError::CorruptedSegmentHeader);
}
}
Ok(parse_segment_header(&header_bytes))
}
}
fn parse_segment_header(bytes: &[u8]) -> SegmentHeader {
SegmentHeader {
addr: u32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]),
length: u32::from_le_bytes([bytes[4], bytes[5], bytes[6], bytes[7]]),
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/peripherals.rs | device/src/peripherals.rs | //! Device peripheral initialization and management
use alloc::{boxed::Box, rc::Rc};
use core::cell::RefCell;
use display_interface_spi::SPIInterface;
use embedded_graphics::{pixelcolor::Rgb565, prelude::*};
use esp_hal::{
delay::Delay,
gpio::{AnyPin, Input, Io, Output, Pull},
hmac::Hmac,
i2c::master::{Config as I2cConfig, I2c},
ledc::{
channel::{self, ChannelIFace},
timer::{self as timerledc, LSClockSource, TimerIFace},
LSGlobalClkSource, Ledc, LowSpeed,
},
peripherals::{Peripherals, DS, RSA, TIMG0, TIMG1},
prelude::*,
spi::master::Spi,
timer::timg::{Timer, Timer0, TimerGroup},
uart::Uart,
usb_serial_jtag::UsbSerialJtag,
Blocking,
};
use frostsnap_cst816s::CST816S;
use mipidsi::models::ST7789;
use rand_chacha::ChaCha20Rng;
use rand_core::{RngCore, SeedableRng};
use crate::efuse::EfuseController;
#[macro_export]
macro_rules! init_display {
(peripherals: $peripherals:expr, delay: $delay:expr) => {{
use display_interface_spi::SPIInterface;
use esp_hal::{
gpio::{Level, Output},
prelude::*,
spi::{
master::{Config as SpiConfig, Spi},
SpiMode,
},
};
use mipidsi::{models::ST7789, options::ColorInversion};
let spi = Spi::new_with_config(
&mut $peripherals.SPI2,
SpiConfig {
frequency: 80.MHz(),
mode: SpiMode::Mode2,
..SpiConfig::default()
},
)
.with_sck(&mut $peripherals.GPIO8)
.with_mosi(&mut $peripherals.GPIO7);
let spi_device = embedded_hal_bus::spi::ExclusiveDevice::new_no_delay(spi, NoCs);
let di = SPIInterface::new(spi_device, Output::new(&mut $peripherals.GPIO9, Level::Low));
let display = mipidsi::Builder::new(ST7789, di)
.display_size(240, 280)
.display_offset(0, 20) // 240*280 panel
.invert_colors(ColorInversion::Inverted)
.reset_pin(Output::new(&mut $peripherals.GPIO6, Level::Low))
.init($delay)
.unwrap();
display
}};
}
/// Dummy CS pin for our display
pub struct NoCs;
impl embedded_hal::digital::OutputPin for NoCs {
fn set_low(&mut self) -> Result<(), Self::Error> {
Ok(())
}
fn set_high(&mut self) -> Result<(), Self::Error> {
Ok(())
}
}
impl embedded_hal::digital::ErrorType for NoCs {
type Error = core::convert::Infallible;
}
/// Type alias for the display to reduce complexity
type Display<'a> = mipidsi::Display<
SPIInterface<
embedded_hal_bus::spi::ExclusiveDevice<
Spi<'a, Blocking>,
NoCs,
embedded_hal_bus::spi::NoDelay,
>,
Output<'a>,
>,
ST7789,
Output<'a>,
>;
/// All device peripherals initialized and ready to use
pub struct DevicePeripherals<'a> {
/// Shared timer for timing operations (leaked to 'static for SerialInterface)
pub timer: &'static Timer<Timer0<TIMG0>, Blocking>,
/// UI timer for display and touch operations
pub ui_timer: Timer<Timer0<TIMG1>, Blocking>,
/// Display
pub display: Display<'a>,
/// Touch receiver for interrupt-based touch handling
pub touch_receiver: frostsnap_cst816s::interrupt::TouchReceiver,
/// Display backlight
pub backlight: channel::Channel<'a, LowSpeed>,
/// UART for upstream device connection (if detected)
pub uart_upstream: Option<Uart<'static, Blocking>>,
/// UART for downstream device connection
pub uart_downstream: Uart<'static, Blocking>,
/// USB JTAG for debugging and upstream connection
pub jtag: UsbSerialJtag<'a, Blocking>,
/// Pin to detect upstream device connection
pub upstream_detect: Input<'a, AnyPin>,
/// Pin to detect downstream device connection
pub downstream_detect: Input<'a, AnyPin>,
/// SHA256 hardware accelerator
pub sha256: esp_hal::sha::Sha<'a>,
/// HMAC hardware module (Rc for shared ownership)
pub hmac: Rc<RefCell<Hmac<'a>>>,
/// Digital Signature peripheral
pub ds: &'a mut DS,
/// RSA hardware accelerator
pub rsa: &'a mut RSA,
/// eFuse controller
pub efuse: EfuseController<'a>,
/// Initial RNG seeded from hardware
pub initial_rng: ChaCha20Rng,
}
/// Extract entropy from hardware RNG mixed with SHA256
fn extract_entropy(
rng: &mut impl RngCore,
sha256: &mut esp_hal::sha::Sha<'_>,
bytes: usize,
) -> ChaCha20Rng {
use frostsnap_core::sha2::digest::FixedOutput;
let mut digest = sha256.start::<esp_hal::sha::Sha256>();
for _ in 0..(bytes.div_ceil(64)) {
let mut entropy = [0u8; 64];
rng.fill_bytes(&mut entropy);
frostsnap_core::sha2::digest::Update::update(&mut digest, entropy.as_ref());
}
let result = digest.finalize_fixed();
ChaCha20Rng::from_seed(result.into())
}
// Static storage for peripherals to enable 'static references
// This is safe because peripherals are initialized once at startup and never dropped
static mut PERIPHERALS_SINGLETON: Option<Peripherals> = None;
impl<'a> DevicePeripherals<'a> {
/// Check if the device needs factory provisioning
pub fn needs_factory_provisioning(&self) -> bool {
!self.efuse.has_hmac_keys_initialized()
}
/// Initialize all device peripherals including initial RNG
pub fn init(peripherals: Peripherals) -> Box<Self> {
// SAFETY: We can store peripherals in static storage and get a 'static reference
// since we're never passing it on to anyone else.
let peripherals = unsafe {
PERIPHERALS_SINGLETON = Some(peripherals);
// Use a raw pointer to avoid the mutable static warning
let ptr = &raw mut PERIPHERALS_SINGLETON;
(*ptr).as_mut().unwrap()
};
// Initialize Io for interrupt handling.
// SAFETY: We bypass the check that esp-hal is trying to get us to do here since this function has the
// only copy of Peripherals. Hopefully this doesn't need to happen in esp-hal v1.0+.
let mut io = Io::new(unsafe { core::mem::zeroed() });
// Enable stack guard if feature is enabled
#[cfg(feature = "stack_guard")]
crate::stack_guard::enable_stack_guard(&mut peripherals.ASSIST_DEBUG);
let mut delay = Delay::new();
// Initialize SHA256 early for entropy extraction
let mut sha256 = esp_hal::sha::Sha::new(&mut peripherals.SHA);
// Get initial entropy from hardware RNG mixed with SHA256
let mut trng = esp_hal::rng::Trng::new(&mut peripherals.RNG, &mut peripherals.ADC1);
let initial_rng = extract_entropy(&mut trng, &mut sha256, 1024);
// Initialize timers
let timg0 = TimerGroup::new(&mut peripherals.TIMG0);
let timg1 = TimerGroup::new(&mut peripherals.TIMG1);
// Extract timer0 from TIMG0 and leak it to get 'static reference for SerialInterface
// This is safe because the timer lives for the entire program lifetime
let timer = Box::leak(Box::new(timg0.timer0));
let ui_timer = timg1.timer0;
// Detection pins (using AnyPin to avoid generics)
let upstream_detect = Input::new(&mut peripherals.GPIO0, Pull::Up);
let downstream_detect = Input::new(&mut peripherals.GPIO10, Pull::Up);
// Initialize backlight control
let mut ledc = Ledc::new(&mut peripherals.LEDC);
ledc.set_global_slow_clock(LSGlobalClkSource::APBClk);
let mut lstimer0 = ledc.timer::<LowSpeed>(timerledc::Number::Timer0);
lstimer0
.configure(timerledc::config::Config {
duty: timerledc::config::Duty::Duty10Bit,
clock_source: LSClockSource::APBClk,
frequency: 24u32.kHz(),
})
.unwrap();
// Leak the timer so it lives forever (we never need to drop it)
let lstimer0 = Box::leak(Box::new(lstimer0));
let mut backlight = ledc.channel(channel::Number::Channel0, &mut peripherals.GPIO1);
backlight
.configure(channel::config::Config {
timer: lstimer0,
duty_pct: 0, // Start with backlight off
pin_config: channel::config::PinConfig::PushPull,
})
.unwrap();
let mut display = init_display!(peripherals: peripherals, delay: &mut delay);
// Initialize I2C for touch sensor
let i2c = I2c::new(
&mut peripherals.I2C0,
I2cConfig {
frequency: 400u32.kHz(),
..I2cConfig::default()
},
)
.with_sda(&mut peripherals.GPIO4)
.with_scl(&mut peripherals.GPIO5);
let mut capsense = CST816S::new_esp32(i2c, &mut peripherals.GPIO2, &mut peripherals.GPIO3);
capsense.setup(&mut delay).unwrap();
// Register the capsense instance with the interrupt handler
let touch_receiver = frostsnap_cst816s::interrupt::register(capsense, &mut io);
// Clear display and turn on backlight
let _ = display.clear(Rgb565::BLACK);
backlight.start_duty_fade(0, 30, 500).unwrap();
// Initialize other crypto peripherals
let efuse = EfuseController::new(&mut peripherals.EFUSE);
let hmac = Rc::new(RefCell::new(Hmac::new(&mut peripherals.HMAC)));
// Initialize JTAG
let jtag = UsbSerialJtag::new(&mut peripherals.USB_DEVICE);
// Initialize upstream UART only if upstream device is detected
let uart_upstream = if upstream_detect.is_low() {
Some(
Uart::new(
&mut peripherals.UART1,
&mut peripherals.GPIO18,
&mut peripherals.GPIO19,
)
.unwrap(),
)
} else {
None
};
// Always initialize downstream UART
let uart_downstream = Uart::new(
&mut peripherals.UART0,
&mut peripherals.GPIO21,
&mut peripherals.GPIO20,
)
.unwrap();
Box::new(Self {
timer,
ui_timer,
display,
touch_receiver,
backlight,
uart_upstream,
uart_downstream,
jtag,
upstream_detect,
downstream_detect,
sha256,
hmac,
ds: &mut peripherals.DS,
efuse,
initial_rng,
rsa: &mut peripherals.RSA,
})
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/ui.rs | device/src/ui.rs | // Imports removed - legacy screens are not used in stateless Workflow
use alloc::{boxed::Box, string::String};
use frost_backup::ShareBackup;
use frostsnap_comms::{DeviceName, Sha256Digest};
use frostsnap_core::{
device::{restoration::EnterBackupPhase, KeyGenPhase3, SignPhase1},
message::HeldShare2,
tweak::BitcoinBip32Path,
AccessStructureRef,
};
pub trait UserInteraction {
fn set_downstream_connection_state(&mut self, state: crate::DownstreamConnectionState);
fn set_upstream_connection_state(&mut self, state: crate::UpstreamConnectionState);
fn set_workflow(&mut self, workflow: Workflow);
fn set_busy_task(&mut self, task: BusyTask);
fn clear_busy_task(&mut self);
fn poll(&mut self) -> Option<UiEvent>;
}
// Implement UserInteraction for Box<T> where T implements UserInteraction
impl<T: UserInteraction + ?Sized> UserInteraction for Box<T> {
fn set_downstream_connection_state(&mut self, state: crate::DownstreamConnectionState) {
(**self).set_downstream_connection_state(state)
}
fn set_upstream_connection_state(&mut self, state: crate::UpstreamConnectionState) {
(**self).set_upstream_connection_state(state)
}
fn set_workflow(&mut self, workflow: Workflow) {
(**self).set_workflow(workflow)
}
fn set_busy_task(&mut self, task: BusyTask) {
(**self).set_busy_task(task)
}
fn clear_busy_task(&mut self) {
(**self).clear_busy_task()
}
fn poll(&mut self) -> Option<UiEvent> {
(**self).poll()
}
}
#[derive(Debug, Default)]
pub enum Workflow {
#[default]
Startup,
None,
Standby {
device_name: DeviceName,
held_share: HeldShare2,
},
UserPrompt(Prompt),
NamingDevice {
new_name: DeviceName,
},
DisplayBackup {
key_name: String,
backup: ShareBackup,
access_structure_ref: AccessStructureRef,
},
EnteringBackup(EnterBackupPhase),
DisplayAddress {
address: bitcoin::Address,
bip32_path: BitcoinBip32Path,
rand_seed: u32,
},
FirmwareUpgrade(FirmwareUpgradeStatus),
}
impl Workflow {
#[must_use]
pub fn prompt(prompt: Prompt) -> Self {
Self::UserPrompt(prompt)
}
}
#[derive(Clone, Debug)]
pub enum Prompt {
KeyGen {
phase: Box<KeyGenPhase3>,
},
Signing {
phase: Box<SignPhase1>,
},
NewName {
old_name: Option<DeviceName>,
new_name: DeviceName,
},
ConfirmFirmwareUpgrade {
firmware_digest: Sha256Digest,
size: u32,
},
WipeDevice,
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BusyTask {
KeyGen,
Signing,
VerifyingShare,
Loading,
GeneratingNonces,
}
#[derive(Clone, Copy, Debug)]
pub enum FirmwareUpgradeStatus {
Erase { progress: f32 },
Download { progress: f32 },
Passive,
}
#[derive(Clone, Debug)]
pub enum UiEvent {
KeyGenConfirm {
phase: Box<KeyGenPhase3>,
},
SigningConfirm {
phase: Box<SignPhase1>,
},
NameConfirm(frostsnap_comms::DeviceName),
EnteredShareBackup {
phase: EnterBackupPhase,
share_backup: ShareBackup,
},
BackupRecorded {
access_structure_ref: AccessStructureRef,
},
UpgradeConfirm,
WipeDataConfirm,
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/ota.rs | device/src/ota.rs | use crate::{
io::SerialIo,
partitions::{EspFlashPartition, PartitionExt},
secure_boot,
ui::{self, UserInteraction},
};
use alloc::boxed::Box;
use bincode::config::{Fixint, LittleEndian};
use esp_hal::rsa::Rsa;
use esp_hal::sha::Sha;
use esp_hal::time::Duration;
use esp_hal::timer;
use esp_hal::Blocking;
use frostsnap_comms::{
CommsMisc, DeviceSendBody, Sha256Digest, BAUDRATE, FIRMWARE_NEXT_CHUNK_READY_SIGNAL,
FIRMWARE_UPGRADE_CHUNK_LEN,
};
use nb::block;
#[derive(Clone, Debug)]
pub struct OtaPartitions<'a> {
pub otadata: EspFlashPartition<'a>,
pub ota_0: EspFlashPartition<'a>,
pub ota_1: EspFlashPartition<'a>,
}
/// CRC used by out bootloader (and incidentally python's binutils crc32 function when passed 0xFFFFFFFF as the init).
const CRC: crc::Crc<u32> = crc::Crc::<u32>::new(&crc::Algorithm {
width: 32,
poly: 0x04c11db7,
init: 0x0,
refin: true,
refout: true,
xorout: 0xffffffff,
check: 0xcbf43926, // This is just for reference
residue: 0xdebb20e3,
});
const SECTOR_SIZE: u32 = 4096;
/// We switch the baudrate during OTA update to make it faster
const OTA_UPDATE_BAUD: u32 = 921_600;
/// we want fixint encoding for the otadata section because that's what esp32 uses.
const OTADATA_BINCODE_CONFIG: bincode::config::Configuration<LittleEndian, Fixint> =
bincode::config::legacy();
/// This is the somewhat undocumented layout of each of the two otadata slots.
/// The seq_crc is just a crc on the seq value.
#[derive(bincode::Encode, bincode::Decode, Clone, Debug, PartialEq)]
struct EspOtadataSlot {
seq: u32,
reserved: [u8; 24],
seq_crc: u32,
/// defined by us
pub our_metadata: OtaMetadata,
}
impl<'a> OtaPartitions<'a> {
fn next_slot(&self) -> usize {
self.current_slot().map(|(i, _)| (i + 1) % 2).unwrap_or(0)
}
fn ota_partitions(&self) -> [EspFlashPartition<'a>; 2] {
[self.ota_0, self.ota_1]
}
fn otadata_sectors(&self) -> [EspFlashPartition<'a>; 2] {
let mut ota_0_desc = self.otadata;
let ota_1_desc = ota_0_desc.split_off_end(1);
[ota_0_desc, ota_1_desc]
}
pub fn active_partition(&self) -> EspFlashPartition<'a> {
match self.current_slot() {
Some((slot, _)) => self.ota_partitions()[slot],
None => self.ota_0,
}
}
fn current_slot(&self) -> Option<(usize, EspOtadataSlot)> {
let mut best_partition: Option<(usize, EspOtadataSlot)> = None;
for (i, slot) in self.otadata_sectors().into_iter().enumerate() {
let otadata_slot = bincode::decode_from_reader::<EspOtadataSlot, _, _>(
slot.bincode_reader(),
OTADATA_BINCODE_CONFIG,
);
let otadata_slot = match otadata_slot {
Ok(otadata_slot) => otadata_slot,
Err(_) => continue,
};
let implied_crc = CRC.checksum(&otadata_slot.seq.to_le_bytes());
if implied_crc != otadata_slot.seq_crc {
continue;
}
if best_partition.as_ref().map(|(_, data)| data.seq) < Some(otadata_slot.seq) {
best_partition = Some((i, otadata_slot));
}
}
best_partition
}
pub fn active_slot_metadata(&self) -> Option<OtaMetadata> {
self.current_slot().map(|(_, slot)| slot.our_metadata)
}
/// Write to the otadata parition to indicate that a different partition should be the main one.
fn switch_partition(&self, slot: usize, metadata: OtaMetadata) {
// to select it the parition must be higher than the other one
let next_seq = match self.current_slot() {
Some((current_slot, otadata_slot)) => {
if slot == current_slot {
/* do nothing */
return;
} else {
otadata_slot
.seq
.checked_add(1)
.expect("practically unreachable")
}
}
None => 1,
};
// it also needs a valid checksum on the parition
let seq_crc = CRC.checksum(&next_seq.to_le_bytes());
let otadata = EspOtadataSlot {
seq: next_seq,
reserved: Default::default(),
seq_crc,
our_metadata: metadata,
};
let target = self.otadata_sectors()[slot];
target.erase_all().expect("failed to erase");
let mut writer = target.bincode_writer_remember_to_flush::<64>();
bincode::encode_into_writer(&otadata, &mut writer, OTADATA_BINCODE_CONFIG)
.expect("failed to write otadata");
let _ = writer.flush().expect("failed to switch parition");
let what_was_written: EspOtadataSlot =
bincode::decode_from_reader(target.bincode_reader(), OTADATA_BINCODE_CONFIG)
.expect("failed to read back what was written");
assert_eq!(
what_was_written, otadata,
"check that what was written was right"
);
}
pub fn start_upgrade(
&self,
size: u32,
expected_digest: Sha256Digest,
active_partition_digest: Sha256Digest,
) -> FirmwareUpgradeMode<'_> {
let slot = self.next_slot();
let partition = &self.ota_partitions()[slot];
assert!(
size <= partition.size(),
"new firmware size should fit inside the partition"
);
assert!(
partition.size().is_multiple_of(FIRMWARE_UPGRADE_CHUNK_LEN),
"these should match up to avoid overwriting"
);
if expected_digest == active_partition_digest {
FirmwareUpgradeMode::Passive {
size,
sent_ack: false,
}
} else {
FirmwareUpgradeMode::Upgrading {
ota: self.clone(),
ota_slot: slot,
expected_digest,
size,
state: State::WaitingForConfirm { sent_prompt: false },
}
}
}
}
#[derive(Debug)]
pub enum FirmwareUpgradeMode<'a> {
Upgrading {
ota: OtaPartitions<'a>,
ota_slot: usize,
expected_digest: Sha256Digest,
size: u32,
state: State,
},
Passive {
size: u32,
sent_ack: bool,
},
}
#[derive(Clone, Debug)]
pub enum State {
WaitingForConfirm { sent_prompt: bool },
Erase { seq: u32 },
WaitingToEnterUpgradeMode,
}
impl FirmwareUpgradeMode<'_> {
pub fn poll(&mut self, ui: &mut impl crate::ui::UserInteraction) -> Option<DeviceSendBody> {
match self {
FirmwareUpgradeMode::Upgrading {
ota,
ota_slot,
expected_digest,
size,
state,
} => {
let partition = ota.ota_partitions()[*ota_slot];
match state {
State::WaitingForConfirm { sent_prompt } if !*sent_prompt => {
*sent_prompt = true;
ui.set_workflow(ui::Workflow::prompt(ui::Prompt::ConfirmFirmwareUpgrade {
firmware_digest: *expected_digest,
size: *size,
}));
None
}
State::Erase { seq } => {
let mut finished = false;
let last_sector_index = partition.n_sectors() - 1;
/// So we erase multiple sectors poll (otherwise it's slow).
const ERASE_CHUNK_SIZE: usize = 1;
for _ in 0..ERASE_CHUNK_SIZE {
partition.erase_sector(*seq).expect("must erase sector");
*seq += 1;
if *seq == last_sector_index {
finished = true;
break;
}
}
ui.set_workflow(ui::Workflow::FirmwareUpgrade(
ui::FirmwareUpgradeStatus::Erase {
progress: *seq as f32 / last_sector_index as f32,
},
));
if finished {
*state = State::WaitingToEnterUpgradeMode;
Some(DeviceSendBody::Misc(CommsMisc::AckUpgradeMode))
} else {
None
}
}
_ => {
/* waiting */
None
}
}
}
FirmwareUpgradeMode::Passive { sent_ack, .. } => {
if !*sent_ack {
*sent_ack = true;
ui.set_workflow(ui::Workflow::FirmwareUpgrade(
ui::FirmwareUpgradeStatus::Passive,
));
Some(DeviceSendBody::Misc(CommsMisc::AckUpgradeMode))
} else {
None
}
/* we will passively forward data for upgrade no need to prompt or do anything */
}
}
}
pub fn upgrade_confirm(&mut self) {
match self {
FirmwareUpgradeMode::Upgrading {
state: state @ State::WaitingForConfirm { sent_prompt: true },
..
} => {
*state = State::Erase { seq: 0 };
}
_ => {
panic!(
"Upgrade confirmed while not waiting for a confirmation. {:?}",
self
);
}
}
}
pub fn enter_upgrade_mode<T: timer::Timer>(
&mut self,
upstream_io: &mut SerialIo<'_>,
mut downstream_io: Option<&mut SerialIo<'_>>,
ui: &mut impl UserInteraction,
sha: &mut Sha<'_>,
timer: &T,
rsa: &mut Rsa<Blocking>,
) {
match self {
FirmwareUpgradeMode::Upgrading { state, .. } => {
if !matches!(state, State::WaitingToEnterUpgradeMode) {
panic!("can't start upgrade while still preparing");
}
}
FirmwareUpgradeMode::Passive { .. } => { /* always ready to enter upgrade mode */ }
}
let upgrade_size = match *self {
FirmwareUpgradeMode::Upgrading { size, .. } => size,
FirmwareUpgradeMode::Passive { size, .. } => size,
};
upstream_io.change_baud(OTA_UPDATE_BAUD);
if let Some(downstream_io) = &mut downstream_io {
downstream_io.change_baud(OTA_UPDATE_BAUD);
}
let start = timer.now();
while timer.now().checked_duration_since(start).unwrap() < Duration::millis(100) {
// wait for everyone to finish changing BAUD rates to prevent race condition
}
// allocate it on heap with Box to avoid enlarging stack
let mut in_buf = Box::new([0xffu8; SECTOR_SIZE as usize]);
let mut i = 0;
let mut byte_count = 0;
let mut sector = 0;
let mut finished_writing = false;
let mut downstream_ready = downstream_io.is_none();
let mut told_upstream_im_ready = false;
while !finished_writing {
if downstream_ready {
if let Some(byte) = upstream_io.read_byte() {
in_buf[i] = byte;
i += 1;
byte_count += 1;
finished_writing = byte_count == upgrade_size;
if let Some(downstream_io) = &mut downstream_io {
block!(downstream_io.write_byte_nb(byte)).unwrap();
}
if i == SECTOR_SIZE as usize || finished_writing {
// we know the downstream device (if it exists) might be writing to flash so
// assume it's not ready yet.
downstream_ready = downstream_io.is_none();
// likewise the upstream device assumes we're not ready
told_upstream_im_ready = false;
i = 0;
// only write to the partition if we're actually upgrading
if let FirmwareUpgradeMode::Upgrading {
ota_slot,
ota,
size,
..
} = &self
{
let partition = ota.ota_partitions()[*ota_slot];
partition.nor_write_sector(sector, &in_buf).unwrap();
ui.set_workflow(ui::Workflow::FirmwareUpgrade(
ui::FirmwareUpgradeStatus::Download {
progress: byte_count as f32 / *size as f32,
},
));
ui.poll();
}
in_buf.fill(0xff);
sector += 1;
}
}
}
if !finished_writing {
if let Some(downstream_io) = &mut downstream_io {
while let Some(byte) = downstream_io.read_byte() {
assert!(
byte == FIRMWARE_NEXT_CHUNK_READY_SIGNAL,
"invalid control byte sent by downstream"
);
downstream_ready = true;
}
}
if downstream_ready && !told_upstream_im_ready {
block!(upstream_io.write_byte_nb(FIRMWARE_NEXT_CHUNK_READY_SIGNAL)).unwrap();
upstream_io.nb_flush();
told_upstream_im_ready = true;
}
}
}
ui.poll();
if let Some(downstream_io) = &mut downstream_io {
downstream_io.flush();
}
// change it back to the original baudrate but keep in mind that the devices are meant to
// restart after the upgrade.
upstream_io.change_baud(BAUDRATE);
if let Some(downstream_io) = &mut downstream_io {
downstream_io.change_baud(BAUDRATE);
}
if let FirmwareUpgradeMode::Upgrading {
ota_slot,
expected_digest,
ota,
..
} = &self
{
let partition = &ota.ota_partitions()[*ota_slot];
let (firmware_size, firmware_and_signature_block_size) =
partition.firmware_size().unwrap();
// Verify firmware digest - we accept BOTH digest types:
//
// 1. Deterministic firmware digest (PrepareUpgrade2): Hash of firmware only,
// excluding padding and signature block. Displayed on device screen so users
// can verify it matches their locally-built reproducible firmware.
//
// 2. Legacy full digest (PrepareUpgrade): Hash of entire signed firmware including
// padding and signature block. Used by v0.0.1 and earlier coordinators.
//
// Why accept both?
// - SHA256 collision resistance (~2^-256) makes accidental matches impossible
// - Simplifies code - no need to track which message variant was received
// - Provides backwards compatibility with older coordinators
// - Allows graceful fallback if coordinator sends wrong digest type
//
// See frostsnap_comms::CoordinatorUpgradeMessage for protocol documentation.
let digest_without_signature = partition.sha256_digest(sha, Some(firmware_size));
if digest_without_signature != *expected_digest {
let digest_with_signature =
partition.sha256_digest(sha, Some(firmware_and_signature_block_size));
if digest_with_signature != *expected_digest {
panic!(
"upgrade downloaded did not match intended digest.\n\nGot:\n{}\n\nExpected:\n{}\n\n(Legacy:\n{})",
digest_without_signature, expected_digest, digest_with_signature
);
}
}
if secure_boot::is_secure_boot_enabled() {
secure_boot::verify_secure_boot(partition, rsa, sha).unwrap();
}
ota.switch_partition(*ota_slot, OtaMetadata {});
}
}
}
#[derive(Clone, Copy, Debug, Default, bincode::Encode, bincode::Decode, PartialEq)]
pub struct OtaMetadata {}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/ds.rs | device/src/ds.rs | use alloc::vec::Vec;
use esp_hal::{peripherals::DS, sha::Sha};
use frostsnap_comms::factory::pad_message_for_rsa;
use frostsnap_comms::factory::DS_KEY_SIZE_BITS;
use nb::block;
/// Hardware DS signing implementation using ESP32's Digital Signature peripheral
pub struct HardwareDs<'a> {
ds: &'a DS,
encrypted_params: Vec<u8>,
}
impl<'a> HardwareDs<'a> {
/// Create a new HardwareDs instance
pub fn new(ds: &'a DS, encrypted_params: Vec<u8>) -> Self {
Self {
ds,
encrypted_params,
}
}
/// Sign a message using the hardware DS peripheral
pub fn sign(&mut self, message: &[u8], sha256: &mut Sha<'_>) -> [u8; 384] {
// Calculate message digest using hardware SHA and apply padding
let mut digest = [0u8; 32];
let mut hasher = sha256.start::<esp_hal::sha::Sha256>();
let mut remaining = message;
while !remaining.is_empty() {
remaining = block!(hasher.update(remaining)).expect("infallible");
}
block!(hasher.finish(&mut digest)).unwrap();
let padded_message = pad_message_for_rsa(&digest);
let sig = private_exponentiation(self.ds, &self.encrypted_params, padded_message);
words_to_bytes(&sig)
}
}
fn words_to_bytes(words: &[u32; 96]) -> [u8; 384] {
let mut result = [0u8; 384];
for (i, &word) in words.iter().rev().enumerate() {
let bytes = word.to_be_bytes();
let start = i * 4;
result[start..start + 4].copy_from_slice(&bytes);
}
result
}
fn private_exponentiation(ds: &DS, encrypted_params: &[u8], mut challenge: [u8; 384]) -> [u32; 96] {
challenge.reverse();
let iv = &encrypted_params[..16];
let ciph = &encrypted_params[16..];
let y_ciph = &ciph[0..384];
let m_ciph = &ciph[384..768];
let rb_ciph = &ciph[768..1152];
let box_ciph = &ciph[1152..1200];
if ciph.len() != 1200 {
panic!("incorrect cipher length!");
}
ds.set_start().write(|w| w.set_start().set_bit());
while ds.query_busy().read().query_busy().bit() {
// text_display!(display, "Checking DS Key");
}
if ds.query_key_wrong().read().query_key_wrong().bits() == 0 {
// text_display!(display, "DS Ready");
} else {
panic!("DS key read error!");
}
for (i, v) in iv.chunks(4).enumerate() {
let data = u32::from_le_bytes(v.try_into().unwrap());
ds.iv_mem(i).write(|w| unsafe { w.bits(data) });
}
for (i, v) in challenge.chunks(4).enumerate() {
let data = u32::from_le_bytes(v.try_into().unwrap());
ds.x_mem(i).write(|w| unsafe { w.bits(data) });
}
for (i, v) in y_ciph.chunks(4).enumerate() {
let data = u32::from_le_bytes(v.try_into().unwrap());
ds.y_mem(i).write(|w| unsafe { w.bits(data) });
}
for (i, v) in m_ciph.chunks(4).enumerate() {
let data = u32::from_le_bytes(v.try_into().unwrap());
ds.m_mem(i).write(|w| unsafe { w.bits(data) });
}
for (i, v) in rb_ciph.chunks(4).enumerate() {
let data = u32::from_le_bytes(v.try_into().unwrap());
ds.rb_mem(i).write(|w| unsafe { w.bits(data) });
}
for (i, v) in box_ciph.chunks(4).enumerate() {
let data = u32::from_le_bytes(v.try_into().unwrap());
ds.box_mem(i).write(|w| unsafe { w.bits(data) });
}
ds.set_continue().write(|w| w.set_continue().set_bit());
while ds.query_busy().read().query_busy().bit_is_set() {}
let mut sig = [0u32; 96];
if ds.query_check().read().bits() == 0 {
for (i, sig_word) in sig.iter_mut().enumerate().take(DS_KEY_SIZE_BITS / 32) {
let word = ds.z_mem(i).read().bits();
*sig_word = word;
}
} else {
panic!("Failed to read signature from DS!")
}
ds.set_finish().write(|w| w.set_finish().set_bit());
while ds.query_busy().read().query_busy().bit() {}
sig
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/esp32_run.rs | device/src/esp32_run.rs | //! Main event loop for the device
use crate::partitions::PartitionExt;
use crate::{
flash::{Mutation, MutationLog},
io::SerialInterface,
ota,
resources::Resources,
ui::{self, UiEvent, UserInteraction},
DownstreamConnectionState, Duration, Instant, UpstreamConnection, UpstreamConnectionState,
};
use alloc::{boxed::Box, collections::VecDeque, string::ToString, vec::Vec};
use esp_hal::timer::Timer;
use frostsnap_comms::{
CommsMisc, CoordinatorSendBody, CoordinatorUpgradeMessage, DeviceName, DeviceSendBody,
ReceiveSerial, Upstream, MAGIC_BYTES_PERIOD,
};
use frostsnap_core::{
device::{DeviceToUserMessage, FrostSigner},
device_nonces::NonceJobBatch,
message::{self, DeviceSend},
};
use frostsnap_embedded::NonceAbSlot;
use rand_core::RngCore;
/// Main event loop for the device
pub fn run<'a>(resources: &'a mut Resources<'a>) -> ! {
// Destructure resources
let Resources {
ref mut rng,
ref mut hmac_keys,
ds: ref mut hardware_rsa,
ref certificate,
ref mut nvs,
ota: ref mut ota_partitions,
ref mut ui,
ref mut timer,
ref mut sha256,
ref mut upstream_serial,
ref mut downstream_serial,
ref mut downstream_detect,
ref mut rsa,
} = resources;
// create an unmolested copy first so we can erase it all
let full_nvs = *nvs;
// Read device header and keypair from NVS
let header_sectors = nvs.split_off_front(2);
let header_flash = crate::flash::FlashHeader::new(header_sectors);
let header = match header_flash.read_header() {
Some(h) => h,
None => {
// New device - verify NVS is empty
if !nvs.is_empty().expect("checking NVS is empty") {
panic!("Device appears to be new but NVS is not blank. Maybe you need to manually erase the device?");
}
// Initialize new header with device keypair
header_flash.init(rng)
}
};
let device_keypair = header.device_keypair(&mut hmac_keys.fixed_entropy);
// Set up NVS partitions for shares, nonces, and mutation log
let share_partition = nvs.split_off_front(2);
// Keep some space reserved for other potential uses in the future, 8 AB slots
let _reserved = nvs.split_off_front(8 * 2);
let nonce_slots = {
// Give half the remaining nvs over to nonces
let mut n_nonce_sectors = nvs.n_sectors().div_ceil(2);
// Make sure it's a multiple of 2
n_nonce_sectors = (n_nonce_sectors.div_ceil(2) * 2).max(16);
// Each nonce slot requires 2 sectors so divide by 2 to get the number of slots
NonceAbSlot::load_slots(nvs.split_off_front(n_nonce_sectors))
};
// The event log gets the rest of the sectors
let mut mutation_log = MutationLog::new(share_partition, *nvs);
// Initialize signer with device keypair and nonce slots
let mut signer = FrostSigner::new(device_keypair, nonce_slots);
// Apply any existing mutations from the log
let mut name: Option<DeviceName> = None;
for change in mutation_log.seek_iter() {
match change {
Ok(change) => match change {
Mutation::Core(mutation) => {
signer.apply_mutation(mutation);
}
Mutation::Name(name_update) => {
// Truncate to DeviceName length when loading from flash
let device_name = frostsnap_comms::DeviceName::truncate(name_update);
name = Some(device_name);
}
},
Err(e) => {
panic!("failed to read event: {e}");
}
}
}
// Note: widgets handles recovery mode internally
// Get active firmware information
let active_partition = ota_partitions.active_partition();
let (firmware_size, _firmware_and_signature_block_size) =
active_partition.firmware_size().unwrap();
let active_firmware_digest = active_partition.sha256_digest(sha256, Some(firmware_size));
let device_id = signer.device_id();
// Initialize state variables
let mut soft_reset = true;
let mut downstream_connection_state = DownstreamConnectionState::Disconnected;
let mut sends_user: Vec<DeviceToUserMessage> = vec![];
let mut outbox = VecDeque::new();
let mut nonce_task_batch: Option<NonceJobBatch> = None;
let mut inbox: Vec<CoordinatorSendBody> = vec![];
let mut next_write_magic_bytes_downstream: Instant = Instant::from_ticks(0);
let mut magic_bytes_timeout_counter = 0;
// Define default workflow macro
macro_rules! default_workflow {
($name:expr, $signer:expr) => {
match ($name.as_ref(), $signer.held_shares().next()) {
(Some(device_name), Some(held_share)) => ui::Workflow::Standby {
device_name: device_name.clone(),
held_share,
},
_ => ui::Workflow::None,
}
};
}
ui.set_workflow(default_workflow!(name, signer));
let mut upstream_connection = UpstreamConnection::new(device_id);
ui.set_upstream_connection_state(upstream_connection.state);
let mut upgrade: Option<ota::FirmwareUpgradeMode> = None;
let mut pending_device_name: Option<frostsnap_comms::DeviceName> = None;
ui.clear_busy_task();
// Main event loop
loop {
if soft_reset {
soft_reset = false;
magic_bytes_timeout_counter = 0;
signer.clear_tmp_data();
sends_user.clear();
downstream_connection_state = DownstreamConnectionState::Disconnected;
upstream_connection.set_state(UpstreamConnectionState::PowerOn, ui);
next_write_magic_bytes_downstream = Instant::from_ticks(0);
ui.set_workflow(default_workflow!(name, signer));
upgrade = None;
pending_device_name = None;
outbox.clear();
nonce_task_batch = None;
}
let is_usb_connected_downstream = !downstream_detect.is_high();
// === DOWNSTREAM connection management
match (is_usb_connected_downstream, downstream_connection_state) {
(true, DownstreamConnectionState::Disconnected) => {
downstream_connection_state = DownstreamConnectionState::Connected;
ui.set_downstream_connection_state(downstream_connection_state);
}
(true, DownstreamConnectionState::Connected) => {
let now = timer.now();
if now > next_write_magic_bytes_downstream {
next_write_magic_bytes_downstream = now
.checked_add_duration(Duration::millis(MAGIC_BYTES_PERIOD))
.expect("won't overflow");
downstream_serial
.write_magic_bytes()
.expect("couldn't write magic bytes downstream");
}
if downstream_serial.find_and_remove_magic_bytes() {
downstream_connection_state = DownstreamConnectionState::Established;
ui.set_downstream_connection_state(downstream_connection_state);
upstream_connection.send_debug("Device read magic bytes from another device!");
}
}
(
false,
state @ DownstreamConnectionState::Established
| state @ DownstreamConnectionState::Connected,
) => {
downstream_connection_state = DownstreamConnectionState::Disconnected;
ui.set_downstream_connection_state(downstream_connection_state);
if state == DownstreamConnectionState::Established {
upstream_connection.send_to_coordinator([DeviceSendBody::DisconnectDownstream]);
}
}
_ => { /* nothing to do */ }
}
if downstream_connection_state == DownstreamConnectionState::Established {
while let Some(device_send) = downstream_serial.receive() {
match device_send {
Ok(device_send) => {
match device_send {
ReceiveSerial::MagicBytes(_) => {
upstream_connection
.send_debug("downstream device sent unexpected magic bytes");
// Soft disconnect downstream device to reset it
upstream_connection
.send_to_coordinator([DeviceSendBody::DisconnectDownstream]);
downstream_connection_state =
DownstreamConnectionState::Disconnected;
}
ReceiveSerial::Message(message) => {
upstream_connection.forward_to_coordinator(message);
}
ReceiveSerial::Conch => { /* deprecated */ }
ReceiveSerial::Reset => {
upstream_connection
.send_to_coordinator([DeviceSendBody::DisconnectDownstream]);
downstream_connection_state =
DownstreamConnectionState::Disconnected;
break;
}
_ => { /* unused */ }
};
}
Err(e) => {
upstream_connection
.send_debug(format!("Failed to decode on downstream port: {e}"));
upstream_connection
.send_to_coordinator([DeviceSendBody::DisconnectDownstream]);
downstream_connection_state = DownstreamConnectionState::Disconnected;
break;
}
};
}
}
// === UPSTREAM connection management
match upstream_connection.get_state() {
UpstreamConnectionState::PowerOn => {
if upstream_serial.find_and_remove_magic_bytes() {
upstream_serial
.write_magic_bytes()
.expect("failed to write magic bytes");
log!("upstream got magic bytes");
upstream_connection.send_announcement(DeviceSendBody::Announce {
firmware_digest: active_firmware_digest,
});
upstream_connection.send_to_coordinator([match &name {
Some(name) => DeviceSendBody::SetName { name: name.clone() },
None => DeviceSendBody::NeedName,
}]);
upstream_connection.set_state(UpstreamConnectionState::Established, ui);
}
}
_ => {
let mut last_message_was_magic_bytes = false;
while let Some(received_message) = upstream_serial.receive() {
match received_message {
Ok(received_message) => {
let received_message: ReceiveSerial<Upstream> = received_message;
last_message_was_magic_bytes =
matches!(received_message, ReceiveSerial::MagicBytes(_));
match received_message {
ReceiveSerial::Message(mut message) => {
let for_me: bool = message
.target_destinations
.remove_from_recipients(device_id);
// Forward messages downstream if there are other target destinations
if downstream_connection_state
== DownstreamConnectionState::Established
&& message.target_destinations.should_forward()
{
downstream_serial
.send(message.clone())
.expect("sending downstream");
}
if for_me {
match message.message_body.decode() {
// Upgrade mode must be handled eagerly
Some(CoordinatorSendBody::Upgrade(
CoordinatorUpgradeMessage::EnterUpgradeMode,
)) => {
if let Some(upgrade) = &mut upgrade {
let upstream_io = upstream_serial.inner_mut();
upgrade.enter_upgrade_mode(
upstream_io,
if downstream_connection_state == DownstreamConnectionState::Established {
Some(downstream_serial.inner_mut())
} else {
None
},
ui,
sha256,
*timer,
rsa,
);
reset(upstream_serial);
} else {
panic!("upgrade cannot start because we were not warned about it")
}
}
Some(decoded) => {
inbox.push(decoded);
}
_ => { /* unable to decode so ignore */ }
}
}
}
ReceiveSerial::Conch => {}
ReceiveSerial::Reset => { /* upstream doesn't send this */ }
_ => { /* unused */ }
}
}
Err(e) => {
panic!("upstream read fail:\n{}", e);
}
};
}
let is_upstream_established = matches!(
upstream_connection.get_state(),
UpstreamConnectionState::EstablishedAndCoordAck
);
if last_message_was_magic_bytes {
if is_upstream_established {
// We get unexpected magic bytes after receiving normal messages.
// Upstream must have reset so we should reset.
soft_reset = true;
} else if magic_bytes_timeout_counter > 1 {
// We keep receiving magic bytes so we reset the
// connection and try announce again.
upstream_connection.set_state(UpstreamConnectionState::PowerOn, ui);
magic_bytes_timeout_counter = 0;
} else {
magic_bytes_timeout_counter += 1;
}
}
if let Some(upgrade_) = &mut upgrade {
let message = upgrade_.poll(ui);
upstream_connection.send_to_coordinator(message);
}
}
}
// Process inbox messages
for message_body in inbox.drain(..) {
match &message_body {
CoordinatorSendBody::Cancel => {
signer.clear_tmp_data();
ui.set_workflow(default_workflow!(name, signer));
// This either resets to the previous name, or clears it (if prev name does
// not exist).
pending_device_name = None;
upgrade = None;
}
CoordinatorSendBody::AnnounceAck => {
upstream_connection
.set_state(UpstreamConnectionState::EstablishedAndCoordAck, ui);
}
CoordinatorSendBody::Naming(naming) => match naming {
frostsnap_comms::NameCommand::Preview(preview_name) => {
pending_device_name = Some(preview_name.clone());
ui.set_workflow(ui::Workflow::NamingDevice {
new_name: preview_name.clone(),
});
}
frostsnap_comms::NameCommand::Prompt(new_name) => {
ui.set_workflow(ui::Workflow::prompt(ui::Prompt::NewName {
old_name: name.clone(),
new_name: new_name.clone(),
}));
}
},
CoordinatorSendBody::Core(core_message) => {
if matches!(
core_message,
message::CoordinatorToDeviceMessage::Signing(
message::signing::CoordinatorSigning::OpenNonceStreams { .. }
)
) {
ui.set_busy_task(ui::BusyTask::GeneratingNonces);
} else {
ui.clear_busy_task();
}
outbox.extend(
signer
.recv_coordinator_message(core_message.clone(), rng)
.expect("failed to process coordinator message"),
);
}
CoordinatorSendBody::Upgrade(upgrade_message) => match upgrade_message {
CoordinatorUpgradeMessage::PrepareUpgrade {
size,
firmware_digest,
} => {
let upgrade_ = ota_partitions.start_upgrade(
*size,
*firmware_digest,
active_firmware_digest,
);
upgrade = Some(upgrade_);
}
CoordinatorUpgradeMessage::PrepareUpgrade2 {
size,
firmware_digest,
} => {
let upgrade_ = ota_partitions.start_upgrade(
*size,
*firmware_digest,
active_firmware_digest,
);
upgrade = Some(upgrade_);
}
CoordinatorUpgradeMessage::EnterUpgradeMode => {}
},
CoordinatorSendBody::DataWipe => {
ui.set_workflow(ui::Workflow::prompt(ui::Prompt::WipeDevice))
}
CoordinatorSendBody::Challenge(challenge) => {
// Can only respond if we have hardware RSA and certificate
if let (Some(hw_rsa), Some(cert)) =
(hardware_rsa.as_mut(), certificate.as_ref())
{
let signature = hw_rsa.sign(challenge.as_ref(), sha256);
upstream_connection.send_to_coordinator([
DeviceSendBody::SignedChallenge {
signature: Box::new(signature),
certificate: Box::new(cert.clone()),
},
]);
}
}
}
}
// Apply any staged mutations
{
let staged_mutations = signer.staged_mutations();
if !staged_mutations.is_empty() {
let now = timer.now();
mutation_log
.append(staged_mutations.drain(..).map(Mutation::Core))
.expect("writing core mutations failed");
let after = timer.now().checked_duration_since(now).unwrap();
upstream_connection
.send_debug(format!("core mutations took {}ms", after.to_millis()));
}
}
// 🎯 Poll nonce job batch - process one nonce per iteration
if let Some(batch) = nonce_task_batch.as_mut() {
log!("start");
if batch.do_work(&mut hmac_keys.share_encryption) {
log!("finish");
// Batch completed, send the response with all segments
let completed_batch = nonce_task_batch.take().unwrap();
let segments = completed_batch.into_segments();
outbox.push_back(DeviceSend::ToCoordinator(Box::new(
message::DeviceToCoordinatorMessage::Signing(
message::signing::DeviceSigning::NonceResponse { segments },
),
)));
}
log!("done");
}
// Handle message outbox to send
while let Some(send) = outbox.pop_front() {
match send {
DeviceSend::ToCoordinator(boxed) => {
upstream_connection.send_to_coordinator([DeviceSendBody::Core(*boxed)]);
}
DeviceSend::ToUser(boxed) => {
match *boxed {
DeviceToUserMessage::FinalizeKeyGen { key_name: _ } => {
assert!(
save_pending_device_name(
&mut pending_device_name,
&mut name,
&mut mutation_log,
&mut upstream_connection,
),
"must have named device before starting keygen"
);
ui.clear_busy_task();
ui.set_workflow(default_workflow!(name, signer));
}
DeviceToUserMessage::CheckKeyGen { phase } => {
ui.set_workflow(ui::Workflow::prompt(ui::Prompt::KeyGen { phase }));
}
DeviceToUserMessage::VerifyAddress {
address,
bip32_path,
} => {
let rand_seed = rng.next_u32();
ui.set_workflow(ui::Workflow::DisplayAddress {
address,
bip32_path,
rand_seed,
})
}
DeviceToUserMessage::SignatureRequest { phase } => {
ui.set_workflow(ui::Workflow::prompt(ui::Prompt::Signing { phase }));
}
DeviceToUserMessage::Restoration(to_user_restoration) => {
use frostsnap_core::device::restoration::ToUserRestoration::*;
match *to_user_restoration {
// Note: We immediately decrypt and display the backup without prompting.
// The coordinator has already requested this on behalf of the user.
// If we want to add "confirm before showing" in the future, we'd just
// delay calling phase.decrypt_to_backup() until after user confirms.
DisplayBackup {
key_name,
access_structure_ref,
phase,
} => {
let backup = phase
.decrypt_to_backup(&mut hmac_keys.share_encryption)
.expect("state changed while displaying backup");
ui.set_workflow(ui::Workflow::DisplayBackup {
key_name: key_name.to_string(),
backup,
access_structure_ref,
});
}
EnterBackup { phase } => {
ui.set_workflow(ui::Workflow::EnteringBackup(phase));
}
BackupSaved { .. } => {
assert!(
save_pending_device_name(
&mut pending_device_name,
&mut name,
&mut mutation_log,
&mut upstream_connection,
),
"must have named device before loading backup"
);
ui.set_workflow(default_workflow!(name, signer));
}
ConsolidateBackup(phase) => {
// XXX: We don't tell the user about this message and just automatically confirm it.
// There isn't really anything they could do to actually verify to confirm it but since
outbox.extend(signer.finish_consolidation(
&mut hmac_keys.share_encryption,
phase,
rng,
));
// The device can have a pending device name here if it was asked to consolidate right away instead of being asked to first save the backup.
save_pending_device_name(
&mut pending_device_name,
&mut name,
&mut mutation_log,
&mut upstream_connection,
);
ui.set_workflow(default_workflow!(name, signer));
}
}
}
DeviceToUserMessage::NonceJobs(batch) => {
// 🚀 Set the batch for processing
nonce_task_batch = Some(batch);
}
};
}
}
}
// Handle UI events
if let Some(ui_event) = ui.poll() {
match ui_event {
UiEvent::KeyGenConfirm { phase } => {
outbox.extend(
signer
.keygen_ack(*phase, &mut hmac_keys.share_encryption, rng)
.expect("state changed while confirming keygen"),
);
ui.clear_busy_task();
}
UiEvent::SigningConfirm { phase } => {
ui.set_busy_task(ui::BusyTask::Signing);
outbox.extend(
signer
.sign_ack(*phase, &mut hmac_keys.share_encryption)
.expect("state changed while acking sign"),
);
}
UiEvent::NameConfirm(ref new_name) => {
mutation_log
.push(Mutation::Name(new_name.to_string()))
.expect("flash write fail");
name = Some(new_name.clone());
pending_device_name = Some(new_name.clone());
ui.set_workflow(ui::Workflow::NamingDevice {
new_name: new_name.clone(),
});
upstream_connection.send_to_coordinator([DeviceSendBody::SetName {
name: new_name.clone(),
}]);
}
UiEvent::BackupRecorded {
access_structure_ref: _,
} => {
upstream_connection
.send_to_coordinator([DeviceSendBody::Misc(CommsMisc::BackupRecorded)]);
}
UiEvent::UpgradeConfirm => {
if let Some(upgrade) = upgrade.as_mut() {
upgrade.upgrade_confirm();
}
}
UiEvent::EnteredShareBackup {
phase,
share_backup,
} => {
outbox.extend(
signer.tell_coordinator_about_backup_load_result(phase, share_backup),
);
}
UiEvent::WipeDataConfirm => {
full_nvs.erase_all().expect("failed to erase nvs");
reset(upstream_serial);
}
}
}
if let Some(message) = upstream_connection.dequeue_message() {
upstream_serial
.send(message)
.expect("failed to send message upstream");
}
}
}
fn reset<T>(upstream_serial: &mut SerialInterface<T, Upstream>)
where
T: esp_hal::timer::Timer,
{
let _ = upstream_serial.send_reset_signal();
esp_hal::reset::software_reset();
}
/// Save a pending device name to flash and notify the coordinator
/// Returns true if a pending name was saved, false if there was no pending name
fn save_pending_device_name<S>(
pending_device_name: &mut Option<DeviceName>,
name: &mut Option<DeviceName>,
mutation_log: &mut MutationLog<S>,
upstream_connection: &mut UpstreamConnection,
) -> bool
where
S: embedded_storage::nor_flash::NorFlash,
{
let Some(new_name) = pending_device_name.take() else {
return false;
};
*name = Some(new_name.clone());
mutation_log
.push(Mutation::Name(new_name.to_string()))
.expect("flash write fail");
upstream_connection.send_to_coordinator([DeviceSendBody::SetName { name: new_name }]);
true
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/touch_calibration.rs | device/src/touch_calibration.rs | //! Touch calibration functions for adjusting touch coordinates on the device
//! These polynomial functions correct for touch sensor inaccuracies
use embedded_graphics::prelude::Point;
use micromath::F32Ext;
/// Applies x-axis based adjustment to correct touch coordinates
pub fn x_based_adjustment(x: i32) -> i32 {
let x = x as f32;
let corrected = 1.3189e-14 * x.powi(7) - 2.1879e-12 * x.powi(6) - 7.6483e-10 * x.powi(5)
+ 3.2578e-8 * x.powi(4)
+ 6.4233e-5 * x.powi(3)
- 1.2229e-2 * x.powi(2)
+ 0.8356 * x
- 20.0;
(-corrected) as i32
}
/// Applies y-axis based adjustment to correct touch coordinates
pub fn y_based_adjustment(y: i32) -> i32 {
if y > 170 {
return 0;
}
let y = y as f32;
let corrected =
-5.5439e-07 * y.powi(4) + 1.7576e-04 * y.powi(3) - 1.5104e-02 * y.powi(2) - 2.3443e-02 * y
+ 40.0;
(-corrected) as i32
}
/// Applies both x and y adjustments to a touch point
pub fn adjust_touch_point(mut point: Point) -> Point {
point.y += x_based_adjustment(point.x) + y_based_adjustment(point.y);
point.x = point.x.max(0);
point.y = point.y.max(0);
point
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/secure_boot.rs | device/src/secure_boot.rs | extern crate alloc;
use crate::partitions::EspFlashPartition;
use alloc::boxed::Box;
use alloc::{vec, vec::Vec};
use crc::Crc;
use embedded_storage::nor_flash::NorFlashErrorKind;
use esp_hal::efuse::Efuse;
use esp_hal::rsa::{operand_sizes::Op3072, Rsa, RsaModularExponentiation};
use esp_hal::sha::{Sha, Sha256};
use esp_hal::Blocking;
use frostsnap_comms::firmware_reader::SECTOR_SIZE;
use nb::block;
#[derive(Debug)]
pub enum SecureBootError<'a> {
MissingSignature,
ChecksumInvalid,
EfuseError,
PublicKeyInvalid,
ReadImageError(u32, NorFlashErrorKind),
ImageHashInvalid,
SignatureInvalid,
SignatureError(&'a str),
}
#[derive(Debug)]
struct SignatureBlock {
image_digest: [u8; 32],
rsa_public_modulus: [u8; 384],
rsa_public_exponent: [u8; 4],
precalculated_r: [u8; 384],
precalculated_m_prime: [u8; 4],
rsa_pss_signature: [u8; 384],
crc32: [u8; 4],
}
impl SignatureBlock {
fn from_bytes(data: &[u8; SECTOR_SIZE]) -> Self {
let mut block = SignatureBlock {
image_digest: [0; 32],
rsa_public_modulus: [0; 384],
rsa_public_exponent: [0; 4],
precalculated_r: [0; 384],
precalculated_m_prime: [0; 4],
rsa_pss_signature: [0; 384],
crc32: [0; 4],
};
block.image_digest.copy_from_slice(&data[4..36]);
block.rsa_public_modulus.copy_from_slice(&data[36..420]);
block.rsa_public_exponent.copy_from_slice(&data[420..424]);
block.precalculated_r.copy_from_slice(&data[424..808]);
block.precalculated_m_prime.copy_from_slice(&data[808..812]);
block.rsa_pss_signature.copy_from_slice(&data[812..1196]);
block.crc32.copy_from_slice(&data[1196..1200]);
block
}
}
// Convert bytes to u32 array in little-endian format (ESP32-C3 native format)
fn bytes_to_u32_le_native(bytes: &[u8]) -> Vec<u32> {
bytes
.chunks(4)
.map(|chunk| {
let mut word = [0u8; 4];
word[..chunk.len()].copy_from_slice(chunk);
u32::from_le_bytes(word)
})
.collect()
}
fn verify_rsa_pss_signature(
rsa: &mut Rsa<'_, Blocking>,
sig_block: &SignatureBlock,
message_hash: &[u8; 32],
sha: &mut Sha,
) -> Result<bool, &'static str> {
const KEY_SIZE_BYTES: usize = 384;
// Wait for RSA peripheral to be ready
block!(rsa.ready()).map_err(|_| "RSA peripheral not ready")?;
// Convert signature block data to u32 arrays in ESP32-C3 native little-endian format
// All inputs should be little-endian byte arrays
let modulus_u32 = bytes_to_u32_le_native(&sig_block.rsa_public_modulus);
let exponent_u32 = bytes_to_u32_le_native(&sig_block.rsa_public_exponent);
let r_u32 = bytes_to_u32_le_native(&sig_block.precalculated_r);
// No byte reversal for signature - use native little-endian format
let signature_u32 = bytes_to_u32_le_native(&sig_block.rsa_pss_signature);
let m_prime = u32::from_le_bytes(sig_block.precalculated_m_prime);
// Convert to fixed-size arrays for Op3072 (96 u32 words = 384 bytes = 3072 bits)
if modulus_u32.len() != 96 || r_u32.len() != 96 || signature_u32.len() != 96 {
return Err("Invalid RSA key size - expected 3072 bits");
}
let mut modulus = [0u32; 96];
let mut r_value = [0u32; 96];
let mut signature = [0u32; 96];
let mut exponent = [0u32; 96];
modulus.copy_from_slice(&modulus_u32);
r_value.copy_from_slice(&r_u32);
signature.copy_from_slice(&signature_u32);
exponent[0] = exponent_u32[0]; // Exponent is typically small, just copy first word
// Create modular exponentiation context
let mut mod_exp = RsaModularExponentiation::<Op3072, _>::new(rsa, &exponent, &modulus, m_prime);
// Start the RSA operation: signature^exponent mod modulus
mod_exp.start_exponentiation(&signature, &r_value);
// Read results - no async waiting needed
let mut decrypted = [0u32; 96];
mod_exp.read_results(&mut decrypted);
// Convert ESP32-C3 RSA result to PSS verification format
// ESP32-C3 returns little-endian words, PSS expects big-endian byte order
let mut decrypted_bytes = vec![0u8; KEY_SIZE_BYTES];
for (i, &word) in decrypted.iter().enumerate() {
let bytes = word.to_le_bytes();
decrypted_bytes[i * 4..(i + 1) * 4].copy_from_slice(&bytes);
}
decrypted_bytes.reverse(); // Reverse entire array to get correct PSS format
let decrypted_bytes: [u8; KEY_SIZE_BYTES] = decrypted_bytes.try_into().unwrap();
// Verify PSS padding manually
Ok(verify_pss_padding(&decrypted_bytes, message_hash, sha))
}
fn verify_pss_padding(decrypted: &[u8], message_hash: &[u8], sha: &mut Sha) -> bool {
const SALT_LEN: usize = 32; // PSS salt length for ESP32 (confirmed from ESP-IDF research)
const HASH_LEN: usize = 32; // SHA-256 hash length
const KEY_SIZE_BYTES: usize = 384;
if decrypted.len() != KEY_SIZE_BYTES {
return false;
}
// Check trailer field (last byte should be 0xBC)
if decrypted[KEY_SIZE_BYTES - 1] != 0xbc {
return false;
}
// Extract mHash (H) from the end before trailer
let em_hash = &decrypted[KEY_SIZE_BYTES - HASH_LEN - 1..KEY_SIZE_BYTES - 1];
// Extract maskedDB
let masked_db_len = KEY_SIZE_BYTES - HASH_LEN - 1;
let masked_db = &decrypted[..masked_db_len];
// Generate mask using MGF1
let db_mask = mgf1(em_hash, masked_db_len, sha);
// Unmask DB: DB = maskedDB XOR dbMask
let mut db = vec![0u8; masked_db_len];
for i in 0..masked_db_len {
db[i] = masked_db[i] ^ db_mask[i];
}
// Clear the leftmost bits (since emBits might not be a multiple of 8)
let em_bits = KEY_SIZE_BYTES * 8 - 1; // emLen * 8 - 1 for PSS
let bits_to_clear = 8 - (em_bits % 8);
if bits_to_clear < 8 {
db[0] &= 0xff >> bits_to_clear;
}
// Check that DB starts with zeros followed by 0x01
let ps_len = masked_db_len - SALT_LEN - 1;
for byte in db.iter().take(ps_len) {
if *byte != 0x00 {
return false;
}
}
if db[ps_len] != 0x01 {
return false;
}
// Extract salt
let salt = &db[ps_len + 1..];
if salt.len() != SALT_LEN {
return false;
}
// Reconstruct M' = 0x00 00 00 00 00 00 00 00 || mHash || salt
// Not to be confused with M' from signature block
let mut m_prime = vec![0u8; 8 + HASH_LEN + SALT_LEN];
m_prime[8..8 + HASH_LEN].copy_from_slice(message_hash);
m_prime[8 + HASH_LEN..].copy_from_slice(salt);
// Compute H' = Hash(M') using hardware SHA peripheral
let h_prime = compute_sha256_hardware(sha, &m_prime);
// Verify H == H'
em_hash == h_prime.as_slice()
}
fn mgf1(seed: &[u8], mask_len: usize, sha: &mut Sha) -> Vec<u8> {
let mut mask = Vec::new();
let mut counter = 0u32;
while mask.len() < mask_len {
let mut input = Vec::new();
input.extend_from_slice(seed);
input.extend_from_slice(&counter.to_be_bytes());
let hash = compute_sha256_hardware(sha, &input);
mask.extend_from_slice(&hash);
counter += 1;
}
mask.truncate(mask_len);
mask
}
// Helper function to compute SHA256 using hardware peripheral
fn compute_sha256_hardware(sha: &mut Sha, data: &[u8]) -> [u8; 32] {
let mut hasher = sha.start::<Sha256>();
let mut remaining = data;
while !remaining.is_empty() {
remaining = nb::block!(hasher.update(remaining)).unwrap();
}
let mut result = [0u8; 32];
nb::block!(hasher.finish(&mut result)).unwrap();
result
}
// Find secure boot key digest from eFuse by checking KEY_PURPOSE fields
fn find_secure_boot_key() -> Option<[u8; 32]> {
use esp_hal::efuse::{
KEY0, KEY1, KEY2, KEY3, KEY4, KEY5, KEY_PURPOSE_0, KEY_PURPOSE_1, KEY_PURPOSE_2,
KEY_PURPOSE_3, KEY_PURPOSE_4, KEY_PURPOSE_5, SECURE_BOOT_KEY_REVOKE0,
SECURE_BOOT_KEY_REVOKE1, SECURE_BOOT_KEY_REVOKE2,
};
// Key purpose values and their revoke fields (from ESP32-C3 TRM Table 4.3-1 & 4.3-2)
let secure_boot_digests = [
(9, SECURE_BOOT_KEY_REVOKE0), // DIGEST0
(10, SECURE_BOOT_KEY_REVOKE1), // DIGEST1
(11, SECURE_BOOT_KEY_REVOKE2), // DIGEST2
];
let key_purpose_fields = [
KEY_PURPOSE_0,
KEY_PURPOSE_1,
KEY_PURPOSE_2,
KEY_PURPOSE_3,
KEY_PURPOSE_4,
KEY_PURPOSE_5,
];
let key_data_fields = [KEY0, KEY1, KEY2, KEY3, KEY4, KEY5];
// Search through all key blocks
for (i, &purpose_field) in key_purpose_fields.iter().enumerate() {
let purpose: u8 = Efuse::read_field_le(purpose_field);
// Find matching secure boot digest revoke field
if let Some((_, revoke_field)) = secure_boot_digests
.iter()
.find(|(purpose_val, _)| *purpose_val == purpose)
{
// Check if this key is revoked
let is_revoked = Efuse::read_bit(*revoke_field);
if !is_revoked {
// Read the key data (32 bytes)
let key_data: [u8; 32] = Efuse::read_field_le(key_data_fields[i]);
return Some(key_data);
}
}
}
None
}
/// Check if secure boot is enabled by looking for secure boot key digests in eFuse
pub fn is_secure_boot_enabled() -> bool {
find_secure_boot_key().is_some()
}
fn read_signature_sector(partition: &EspFlashPartition) -> Option<(u32, Box<[u8; SECTOR_SIZE]>)> {
let sector_idx = frostsnap_comms::firmware_reader::find_signature_sector(partition)?;
let sector_data = partition.read_sector(sector_idx).ok()?;
Some((sector_idx, sector_data))
}
pub fn verify_secure_boot<'a>(
app_partition: &EspFlashPartition,
rsa: &mut Rsa<'_, Blocking>,
sha: &mut Sha,
) -> Result<(), SecureBootError<'a>> {
let (signature_sector_index, signature_block) =
read_signature_sector(app_partition).ok_or(SecureBootError::MissingSignature)?;
// Parse signature block structure
let parsed_block = SignatureBlock::from_bytes(&signature_block);
// Verify CRC32 checksum
const CRC: Crc<u32> = Crc::<u32>::new(&crc::CRC_32_ISO_HDLC);
// CRC32 is calculated over first 1196 bytes
let calculated_crc = CRC.checksum(&signature_block[0..1196]);
let stored_crc = u32::from_le_bytes(parsed_block.crc32);
if calculated_crc != stored_crc {
return Err(SecureBootError::ChecksumInvalid);
}
// Verify public key digest against eFuse FIRST
// Find the secure boot key digest from eFuse by checking KEY_PURPOSE fields
let efuse_key_digest = match find_secure_boot_key() {
Some(key_digest) => key_digest,
None => return Err(SecureBootError::EfuseError),
};
// Calculate SHA-256 of public key material from signature block (bytes 36-812)
// This includes: RSA modulus (36-420) + exponent (420-424) + pre-calculated R (424-812)
let public_key_data = &signature_block[36..812]; // 776 bytes total
let calculated_key_digest = compute_sha256_hardware(sha, public_key_data);
if calculated_key_digest != efuse_key_digest {
return Err(SecureBootError::PublicKeyInvalid);
}
// Verify image digest (SHA-256 of application data before signature block)
// Calculate how many sectors contain application data (before signature block)
let mut hasher = sha.start::<Sha256>();
for sector in 0..signature_sector_index {
match app_partition.read_sector(sector) {
Ok(sector_data) => {
let mut remaining = sector_data.as_slice();
while !remaining.is_empty() {
remaining = block!(hasher.update(remaining)).unwrap();
}
}
Err(e) => {
// "Failed to read flash sector {} for image digest verification: {:?}", sector, e
return Err(SecureBootError::ReadImageError(sector, e));
}
}
}
let mut calculated_digest = [0u8; 32];
block!(hasher.finish(&mut calculated_digest)).unwrap();
if calculated_digest != parsed_block.image_digest {
return Err(SecureBootError::ImageHashInvalid);
}
// Verify RSA-PSS signature using hardware RSA peripheral
match verify_rsa_pss_signature(rsa, &parsed_block, &parsed_block.image_digest, sha) {
Ok(true) => { /* Firmware signature verified successfully */ }
Ok(false) => return Err(SecureBootError::SignatureInvalid),
Err(e) => return Err(SecureBootError::SignatureError(e)),
}
// If we reach here, ALL security checks have passed
Ok(())
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
frostsnap/frostsnap | https://github.com/frostsnap/frostsnap/blob/961deb080a81c754f402aaea24286bd141178e9d/device/src/frosty_ui.rs | device/src/frosty_ui.rs | use crate::DISPLAY_REFRESH_MS;
use alloc::{boxed::Box, string::ToString};
use embedded_graphics::prelude::*;
use esp_hal::prelude::*;
use frostsnap_cst816s::interrupt::TouchReceiver;
use frostsnap_widgets::palette::PALETTE;
use frostsnap_widgets::{
backup::{BackupDisplay, EnterShareScreen},
debug::OverlayDebug,
keygen_check::KeygenCheck,
sign_prompt::SignTxPrompt,
DeviceNameScreen, DynWidget, FirmwareUpgradeConfirm, FirmwareUpgradeProgress, Standby, Widget,
HOLD_TO_CONFIRM_TIME_LONG_MS, HOLD_TO_CONFIRM_TIME_MS,
};
use crate::touch_handler;
use crate::ui::FirmwareUpgradeStatus;
use crate::{
root_widget::RootWidget, ui::*, widget_tree::WidgetTree, DownstreamConnectionState, Instant,
UpstreamConnectionState,
};
// Type alias for the display to match factory
type DeviceDisplay<'a> = mipidsi::Display<
display_interface_spi::SPIInterface<
embedded_hal_bus::spi::ExclusiveDevice<
esp_hal::spi::master::Spi<'a, esp_hal::Blocking>,
crate::peripherals::NoCs,
embedded_hal_bus::spi::NoDelay,
>,
esp_hal::gpio::Output<'a>,
>,
mipidsi::models::ST7789,
esp_hal::gpio::Output<'a>,
>;
pub struct FrostyUi<'a> {
pub display: frostsnap_widgets::SuperDrawTarget<
DeviceDisplay<'a>,
embedded_graphics::pixelcolor::Rgb565,
>,
pub widget: OverlayDebug<RootWidget>,
pub touch_receiver: TouchReceiver,
pub last_touch: Option<Point>,
pub last_redraw_time: Instant,
pub downstream_connection_state: DownstreamConnectionState,
pub upstream_connection_state: Option<UpstreamConnectionState>,
pub timer: esp_hal::timer::timg::Timer<
esp_hal::timer::timg::Timer0<esp_hal::peripherals::TIMG1>,
esp_hal::Blocking,
>,
pub busy_task: Option<BusyTask>,
pub current_widget_index: usize,
}
impl<'a> FrostyUi<'a> {
/// Create a new FrostyUi instance
pub fn new(
display: DeviceDisplay<'a>,
touch_receiver: TouchReceiver,
timer: esp_hal::timer::timg::Timer<
esp_hal::timer::timg::Timer0<esp_hal::peripherals::TIMG1>,
esp_hal::Blocking,
>,
) -> Self {
use embedded_graphics::geometry::Size;
use frostsnap_widgets::debug::EnabledDebug;
let root_widget = RootWidget::new(WidgetTree::Standby(Box::new(Standby::new())), 200);
let debug_config = EnabledDebug {
logs: cfg!(feature = "debug_log"),
memory: cfg!(feature = "debug_mem"),
fps: cfg!(feature = "debug_fps"),
};
let mut widget_with_debug = OverlayDebug::new(root_widget, debug_config);
widget_with_debug.set_constraints(Size::new(240, 280));
Self {
display: frostsnap_widgets::SuperDrawTarget::new(display, PALETTE.background),
widget: widget_with_debug,
touch_receiver,
downstream_connection_state: DownstreamConnectionState::Disconnected,
upstream_connection_state: None,
last_touch: None,
last_redraw_time: Instant::from_ticks(0),
current_widget_index: 0,
timer,
busy_task: Default::default(),
}
}
}
impl<'a> UserInteraction for FrostyUi<'a> {
fn set_downstream_connection_state(&mut self, state: crate::DownstreamConnectionState) {
if state != self.downstream_connection_state {
self.downstream_connection_state = state;
}
}
fn set_upstream_connection_state(&mut self, state: crate::UpstreamConnectionState) {
if Some(state) != self.upstream_connection_state {
self.upstream_connection_state = Some(state);
}
}
fn set_workflow(&mut self, workflow: Workflow) {
// Check if we can update the current widget instead of switching
let current_widget = self.widget.inner_mut().current_mut();
match (current_widget, &workflow) {
// If we're already showing Standby, just update its mode
(WidgetTree::Standby(ref mut standby), Workflow::Startup) => {
standby.clear_content();
return;
}
(WidgetTree::Standby(ref mut standby), Workflow::None) => {
standby.set_welcome();
return;
}
(
WidgetTree::Standby(ref mut standby),
Workflow::Standby {
device_name,
held_share,
},
) => {
standby.set_key(device_name.to_string(), held_share.clone());
return;
}
// If we're already showing DeviceNaming and get another NamingDevice workflow, just update the text
(
WidgetTree::DeviceNaming(ref mut device_name_screen),
Workflow::NamingDevice { ref new_name },
) => {
device_name_screen.set_name(new_name.to_string());
return;
}
// If we're already showing FirmwareUpgradeProgress, just update it
(
WidgetTree::FirmwareUpgradeProgress {
widget,
ref mut status,
},
Workflow::FirmwareUpgrade(ref status_current),
) => {
match (*status, status_current) {
(
FirmwareUpgradeStatus::Erase { .. },
FirmwareUpgradeStatus::Erase { progress },
)
| (
FirmwareUpgradeStatus::Download { .. },
FirmwareUpgradeStatus::Download { progress },
) => {
*status = *status_current;
widget.update_progress(*progress);
return;
}
_ => { /* we need a new widget */ }
}
}
// If we're showing KeygenCheck and get another KeyGen prompt, we need a new one
// because the security code would be different
_ => {} // Different widget types, need to switch
};
// Convert workflow to widget tree
let new_page = match workflow {
Workflow::Startup => WidgetTree::Standby(Box::new(Standby::new())),
Workflow::None => {
let mut standby = Standby::new();
standby.set_welcome();
WidgetTree::Standby(Box::new(standby))
}
Workflow::Standby {
device_name,
held_share,
} => {
let mut standby = Standby::new();
standby.set_key(device_name.to_string(), held_share);
WidgetTree::Standby(Box::new(standby))
}
Workflow::UserPrompt(prompt) => {
match prompt {
Prompt::KeyGen { phase } => {
// Extract t_of_n and session_hash from phase
let t_of_n = phase.t_of_n();
let session_hash = phase.session_hash();
// Extract the first 4 bytes as security check code
let mut security_check_code = [0u8; 4];
security_check_code.copy_from_slice(&session_hash.0[..4]);
// Create the KeygenCheck widget with just the display data
let widget = KeygenCheck::new(t_of_n, security_check_code);
// Store both widget and phase in the WidgetTree
WidgetTree::KeygenCheck {
widget: Box::new(widget),
phase: Some(phase),
}
}
Prompt::Signing { phase } => {
// Get the sign task from the phase
let sign_task = phase.sign_task();
// Check what type of signing task this is
match &sign_task.inner {
frostsnap_core::SignTask::BitcoinTransaction {
tx_template,
network,
} => {
// Get the user prompt from the transaction template
let prompt = tx_template.user_prompt(*network);
// Create the SignTxPrompt widget
let widget = Box::new(SignTxPrompt::new(prompt));
// Store both widget and phase in the WidgetTree
WidgetTree::SignTxPrompt {
widget,
phase: Some(phase),
}
}
frostsnap_core::SignTask::Test { message } => {
use frostsnap_widgets::SignMessageConfirm;
let widget = Box::new(SignMessageConfirm::new(message.clone()));
WidgetTree::SignTestPrompt {
widget,
phase: Some(phase),
}
}
frostsnap_core::SignTask::Nostr { .. } => {
// Nostr signing not implemented yet
let mut standby = Standby::new();
standby.set_welcome();
WidgetTree::Standby(Box::new(standby))
}
}
}
Prompt::ConfirmFirmwareUpgrade {
firmware_digest,
size,
} => {
// Create the FirmwareUpgradeConfirm widget
let widget = Box::new(FirmwareUpgradeConfirm::new(firmware_digest.0, size));
// Store the widget and metadata in the WidgetTree
WidgetTree::FirmwareUpgradeConfirm {
widget,
firmware_hash: firmware_digest.0,
firmware_size: size,
confirmed: false,
}
}
Prompt::NewName { old_name, new_name } => {
use frostsnap_widgets::DefaultTextStyle;
use frostsnap_widgets::{HoldToConfirm, Text, FONT_MED};
// Create text for the prompt
let prompt_text = if let Some(old_name) = old_name {
format!("Rename device\nfrom '{}'\nto '{}'?", old_name, new_name)
} else {
format!("Name device\n'{}'?", new_name)
};
let text_widget = Text::new(
prompt_text,
DefaultTextStyle::new(FONT_MED, PALETTE.on_background),
)
.with_alignment(embedded_graphics::text::Alignment::Center);
// Create HoldToConfirm widget with 2 second hold time
let hold_to_confirm =
HoldToConfirm::new(HOLD_TO_CONFIRM_TIME_MS, text_widget);
WidgetTree::NewNamePrompt {
widget: Box::new(hold_to_confirm),
new_name: Some(new_name.clone()),
}
}
Prompt::WipeDevice => {
use frostsnap_widgets::DefaultTextStyle;
use frostsnap_widgets::{HoldToConfirm, Text, FONT_MED};
// Create warning text for device wipe
let prompt_text = "WARNING!\n\nErase all data?\n\nHold to confirm";
let text_widget =
Text::new(prompt_text, DefaultTextStyle::new(FONT_MED, PALETTE.error))
.with_alignment(embedded_graphics::text::Alignment::Center);
// Create HoldToConfirm widget with 3 second hold time for wipe
let hold_to_confirm =
HoldToConfirm::new(HOLD_TO_CONFIRM_TIME_LONG_MS, text_widget);
WidgetTree::WipeDevicePrompt {
widget: Box::new(hold_to_confirm),
confirmed: false,
}
}
}
}
Workflow::NamingDevice { new_name } => {
let device_name_screen = DeviceNameScreen::new(new_name.to_string());
WidgetTree::DeviceNaming(Box::new(device_name_screen))
}
Workflow::DisplayBackup {
key_name: _,
backup,
access_structure_ref,
} => {
let word_indices = backup.to_word_indices();
let share_index: u16 = backup
.index()
.try_into()
.expect("Share index should fit in u16");
let backup_display = BackupDisplay::new(word_indices, share_index);
WidgetTree::DisplayBackup {
widget: Box::new(backup_display),
access_structure_ref: Some(access_structure_ref),
}
}
Workflow::EnteringBackup(phase) => {
let mut widget = Box::new(EnterShareScreen::new());
if cfg!(feature = "prefill-words") {
widget.prefill_test_words();
}
WidgetTree::EnterBackup {
widget,
phase: Some(phase),
}
}
Workflow::DisplayAddress {
address,
bip32_path,
rand_seed,
} => {
use frostsnap_widgets::{AddressWithPath, Center};
// Create the address display widget with just the address index
let mut address_display = AddressWithPath::new(address, bip32_path.index);
address_display.set_rand_highlight(rand_seed);
WidgetTree::AddressDisplay(Box::new(Center::new(address_display)))
}
Workflow::FirmwareUpgrade(status) => {
use crate::ui::FirmwareUpgradeStatus;
let widget = Box::new(match status {
FirmwareUpgradeStatus::Erase { progress } => {
FirmwareUpgradeProgress::erasing(progress)
}
FirmwareUpgradeStatus::Download { progress } => {
FirmwareUpgradeProgress::downloading(progress)
}
FirmwareUpgradeStatus::Passive => FirmwareUpgradeProgress::passive(),
});
WidgetTree::FirmwareUpgradeProgress { widget, status }
}
};
// Switch to the new page with fade transition
self.widget.inner_mut().switch_to(new_page);
}
fn poll(&mut self) -> Option<UiEvent> {
let now = self.timer.now();
let now_ms =
frostsnap_widgets::Instant::from_millis(now.duration_since_epoch().to_millis());
// Handle touch input
touch_handler::process_all_touch_events(
&mut self.touch_receiver,
&mut self.widget,
&mut self.last_touch,
&mut self.current_widget_index,
now_ms,
);
// Only redraw if enough time has passed since last redraw
let elapsed_ms = (now - self.last_redraw_time).to_millis();
if elapsed_ms >= DISPLAY_REFRESH_MS {
// Update last redraw time
self.last_redraw_time = now;
// Draw the widget tree
// Draw the UI stack (includes debug stats overlay)
let _ = self.widget.draw(&mut self.display, now_ms);
}
// Check widget states and generate UI events
match self.widget.inner_mut().current_mut() {
WidgetTree::KeygenCheck {
widget: keygen_check,
phase,
} => {
// Check if confirmed and we still have the phase
if keygen_check.is_confirmed() {
// Take the phase (move it out of the Option)
if let Some(phase_data) = phase.take() {
return Some(UiEvent::KeyGenConfirm { phase: phase_data });
}
}
}
WidgetTree::SignTxPrompt {
widget: sign_prompt,
phase,
} => {
// Check if confirmed and we still have the phase
if sign_prompt.is_confirmed() {
// Take the phase (move it out of the Option)
if let Some(phase_data) = phase.take() {
return Some(UiEvent::SigningConfirm { phase: phase_data });
}
}
}
WidgetTree::SignTestPrompt { widget, phase } => {
// Check if confirmed and we still have the phase
if widget.is_confirmed() {
// Take the phase (move it out of the Option)
if let Some(phase_data) = phase.take() {
return Some(UiEvent::SigningConfirm { phase: phase_data });
}
}
}
WidgetTree::FirmwareUpgradeConfirm {
widget, confirmed, ..
} => {
// Check if the firmware upgrade was confirmed and we haven't sent the event yet
if widget.is_confirmed() && !*confirmed {
*confirmed = true; // Mark as confirmed to prevent duplicate events
return Some(UiEvent::UpgradeConfirm);
}
}
WidgetTree::DisplayBackup {
widget,
access_structure_ref,
} => {
if widget.is_confirmed() {
if let Some(access_structure_ref_data) = access_structure_ref.take() {
return Some(UiEvent::BackupRecorded {
access_structure_ref: access_structure_ref_data,
});
}
}
}
WidgetTree::EnterBackup { widget, phase } => {
// Check if backup entry is complete
if widget.is_finished() {
if let Some(share_backup) = widget.get_backup() {
if let Some(phase) = phase.take() {
return Some(UiEvent::EnteredShareBackup {
phase,
share_backup,
});
};
}
}
}
WidgetTree::NewNamePrompt { widget, new_name } => {
// Check if the name prompt was confirmed and we haven't already sent the event
if widget.is_completed() {
if let Some(name) = new_name.take() {
return Some(UiEvent::NameConfirm(name));
}
}
}
WidgetTree::WipeDevicePrompt { widget, confirmed } => {
// Check if the wipe device prompt was confirmed and we haven't already sent the event
if widget.is_completed() && !*confirmed {
*confirmed = true;
return Some(UiEvent::WipeDataConfirm);
}
}
_ => {}
}
None
}
fn set_busy_task(&mut self, task: BusyTask) {
self.busy_task = Some(task);
// TODO: Update widget tree based on busy task
self.widget.force_full_redraw();
}
fn clear_busy_task(&mut self) {
self.busy_task = None;
self.widget.force_full_redraw();
}
}
| rust | MIT | 961deb080a81c754f402aaea24286bd141178e9d | 2026-01-04T20:21:09.467677Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.