repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tls/tests/tls_process.rs
huginn-net-tls/tests/tls_process.rs
use huginn_net_tls::tls::TlsVersion; use huginn_net_tls::tls_process::{ determine_tls_version, extract_tls_signature_from_client_hello, is_tls_traffic, }; use huginn_net_tls::*; use tls_parser::TlsExtensionType; const TLS_HANDSHAKE_TYPE: u8 = 0x16; /// Helper to create minimal TLS handshake payload fn create_tls_payload(version: tls_parser::TlsVersion) -> Vec<u8> { let version_bytes = version.0.to_be_bytes(); vec![TLS_HANDSHAKE_TYPE, version_bytes[0], version_bytes[1], 0x00, 0x05] } #[test] fn test_tls_detection_by_port() { // Test TLS detection by standard port (443) let payload = vec![0u8; 10]; assert!(!is_tls_traffic(&payload)); // Non-TLS payload should be false } #[test] fn test_tls_detection_by_content_only() { let payload = vec![0u8; 10]; // Non-TLS payload assert!(!is_tls_traffic(&payload)); let tls_payload = create_tls_payload(tls_parser::TlsVersion::Tls12); assert!(is_tls_traffic(&tls_payload)); } #[test] fn test_non_tls_traffic() { // Test HTTP traffic is not detected as TLS let http_payload = b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"; assert!(!is_tls_traffic(http_payload)); // Test invalid TLS version let invalid_payload = vec![TLS_HANDSHAKE_TYPE, 0x02, 0x00, 0x00, 0x05]; assert!(!is_tls_traffic(&invalid_payload)); } #[test] fn test_version_detection() { // Test TLS 1.2 detection let legacy_v12 = tls_parser::TlsVersion::Tls12; assert_eq!(determine_tls_version(&legacy_v12, &[]), TlsVersion::V1_2); // Test TLS 1.3 detection via supported_versions extension let legacy_v12_but_13 = tls_parser::TlsVersion::Tls12; assert_eq!( determine_tls_version(&legacy_v12_but_13, &[TlsExtensionType::SupportedVersions.into()]), TlsVersion::V1_3 ); // Test TLS 1.1 detection let legacy_v11 = tls_parser::TlsVersion::Tls11; assert_eq!(determine_tls_version(&legacy_v11, &[]), TlsVersion::V1_1); // Test TLS 1.0 detection let legacy_v10 = tls_parser::TlsVersion::Tls10; assert_eq!(determine_tls_version(&legacy_v10, &[]), TlsVersion::V1_0); // Test SSL 3.0 detection (legacy) let ssl30 = tls_parser::TlsVersion::Ssl30; assert_eq!(determine_tls_version(&ssl30, &[]), TlsVersion::Ssl3_0); } #[test] fn test_grease_filtering() { // Test GREASE values are properly identified assert!(TLS_GREASE_VALUES.contains(&0x0a0a)); assert!(!TLS_GREASE_VALUES.contains(&0x1301)); // TLS_AES_128_GCM_SHA256 } #[test] fn test_invalid_client_hello() { // Test parsing fails gracefully with invalid data let invalid_data = b"Not a TLS ClientHello"; assert!(parse_tls_client_hello(invalid_data).is_err()); } #[test] fn test_elliptic_curve_point_formats_parsing() { // This test verifies that elliptic_curve_point_formats are parsed correctly // even though they're not used in JA4 generation // Verify that the field exists in the Signature struct let sig = crate::tls::Signature { version: TlsVersion::V1_2, cipher_suites: vec![0x1301], extensions: vec![0x000b], // ec_point_formats extension elliptic_curves: vec![0x001d], elliptic_curve_point_formats: vec![0x00], // uncompressed point format signature_algorithms: vec![0x0403], sni: None, alpn: None, }; // Verify the field is accessible and has the expected type assert_eq!(sig.elliptic_curve_point_formats, vec![0x00]); // Verify that JA4 generation still works with this field present let ja4 = sig.generate_ja4(); assert!(ja4.ja4_a.starts_with("t12i")); // TLS 1.2, no SNI } #[test] fn test_signature_parsing_functional_approach() { // This test verifies that the functional parsing approach works correctly // without using mut and creates a complete Signature in one pass // Create a test signature to verify all fields are accessible let sig = crate::tls::Signature { version: TlsVersion::V1_3, cipher_suites: vec![0x1301, 0x1302], extensions: vec![0x0000, 0x0010, 0x000d], elliptic_curves: vec![0x001d, 0x0017], elliptic_curve_point_formats: vec![0x00], signature_algorithms: vec![0x0403, 0x0804], sni: Some("example.com".to_string()), alpn: Some("h2".to_string()), }; // Verify all fields are properly set assert_eq!(sig.version, TlsVersion::V1_3); assert_eq!(sig.cipher_suites.len(), 2); assert_eq!(sig.extensions.len(), 3); assert_eq!(sig.elliptic_curves.len(), 2); assert_eq!(sig.elliptic_curve_point_formats, vec![0x00]); assert_eq!(sig.signature_algorithms.len(), 2); assert!(sig.sni.is_some()); assert!(sig.alpn.is_some()); // Verify JA4 generation works with the functional structure let ja4 = sig.generate_ja4(); assert!(ja4.ja4_a.starts_with("t13d")); // TLS 1.3, SNI present assert!(!ja4.ja4_b.is_empty()); // Cipher suites present assert!(!ja4.ja4_c.is_empty()); // Extensions present // Verify the elegant enum approach for sorted/unsorted assert_eq!(ja4.full.variant_name(), "ja4"); // Sorted version let ja4_original = sig.generate_ja4_original(); assert_eq!(ja4_original.full.variant_name(), "ja4_o"); // Unsorted version } #[test] fn test_extract_signature_with_mock_client_hello() { use tls_parser::{TlsCipherSuiteID, TlsClientHelloContents, TlsCompressionID, TlsVersion}; // Create a mock ClientHello with basic fields but no extensions let client_hello = TlsClientHelloContents { version: TlsVersion::Tls12, random: &[0u8; 32], // 32 bytes for TLS random session_id: None, ciphers: vec![ TlsCipherSuiteID(0x1301), // TLS_AES_128_GCM_SHA256 TlsCipherSuiteID(0x0a0a), // GREASE value - should be filtered TlsCipherSuiteID(0x1302), // TLS_AES_256_GCM_SHA384 ], comp: vec![TlsCompressionID(0)], // NULL compression ext: None, // No extensions - should still generate JA4 with empty extension fields }; // Should succeed and generate JA4 with empty extension fields (matching JA4 spec) let result = extract_tls_signature_from_client_hello(&client_hello); assert!(result.is_ok(), "Failed to extract TLS signature from ClientHello"); let signature = match result { Ok(sig) => sig, Err(_) => panic!("Should not fail after assert"), }; assert_eq!(signature.version, crate::tls::TlsVersion::V1_2); assert_eq!(signature.cipher_suites.len(), 2); // GREASE filtered out assert!(signature.cipher_suites.contains(&0x1301)); assert!(signature.cipher_suites.contains(&0x1302)); assert!(!signature.cipher_suites.contains(&0x0a0a)); // GREASE filtered assert!(signature.extensions.is_empty()); // No extensions assert!(signature.signature_algorithms.is_empty()); // No signature algorithms assert!(signature.sni.is_none()); // No SNI assert!(signature.alpn.is_none()); // No ALPN // Should be able to generate JA4 fingerprint let ja4 = signature.generate_ja4(); assert!(ja4.ja4_a.starts_with("t12i")); // TLS 1.2, no SNI (i = no SNI) assert!(!ja4.ja4_b.is_empty()); // Cipher suites present // ja4_c might be empty or just a hash of empty extensions } #[test] fn test_extract_signature_grease_filtering() { use tls_parser::{TlsCipherSuiteID, TlsClientHelloContents, TlsCompressionID, TlsVersion}; // Test that GREASE values are properly filtered from cipher suites let client_hello = TlsClientHelloContents { version: TlsVersion::Tls12, random: &[0u8; 32], session_id: None, ciphers: vec![ TlsCipherSuiteID(0x1301), // Valid cipher TlsCipherSuiteID(0x0a0a), // GREASE - should be filtered TlsCipherSuiteID(0x1a1a), // GREASE - should be filtered TlsCipherSuiteID(0x1302), // Valid cipher TlsCipherSuiteID(0x2a2a), // GREASE - should be filtered ], comp: vec![TlsCompressionID(0)], ext: Some(&[0x00, 0x00, 0x00, 0x00]), // Minimal extension data }; // Mock the extension parsing by providing minimal valid extension data let result = extract_tls_signature_from_client_hello(&client_hello); assert!(result.is_ok(), "Failed to extract TLS signature for GREASE test"); let signature = match result { Ok(sig) => sig, Err(_) => panic!("Should not fail after assert"), }; // Should only contain non-GREASE cipher suites assert_eq!(signature.cipher_suites.len(), 2); assert!(signature.cipher_suites.contains(&0x1301)); assert!(signature.cipher_suites.contains(&0x1302)); assert!(!signature.cipher_suites.contains(&0x0a0a)); assert!(!signature.cipher_suites.contains(&0x1a1a)); assert!(!signature.cipher_suites.contains(&0x2a2a)); } #[test] fn test_signature_struct_completeness() { let signature = crate::tls::Signature { version: TlsVersion::V1_2, cipher_suites: vec![0x1301, 0x1302], extensions: vec![0x0000, 0x0010, 0x000d], elliptic_curves: vec![0x001d, 0x0017], elliptic_curve_point_formats: vec![0x00], // uncompressed signature_algorithms: vec![0x0403, 0x0804], sni: Some("example.com".to_string()), alpn: Some("h2".to_string()), }; // Verify all fields are accessible and have correct types assert_eq!(signature.version, TlsVersion::V1_2); assert_eq!(signature.cipher_suites, vec![0x1301, 0x1302]); assert_eq!(signature.extensions, vec![0x0000, 0x0010, 0x000d]); assert_eq!(signature.elliptic_curves, vec![0x001d, 0x0017]); assert_eq!(signature.elliptic_curve_point_formats, vec![0x00]); assert_eq!(signature.signature_algorithms, vec![0x0403, 0x0804]); assert_eq!(signature.sni, Some("example.com".to_string())); assert_eq!(signature.alpn, Some("h2".to_string())); // Verify JA4 generation works with complete signature let ja4 = signature.generate_ja4(); assert!(ja4.ja4_a.starts_with("t12d")); // TLS 1.2, SNI present assert!(!ja4.ja4_b.is_empty()); assert!(!ja4.ja4_c.is_empty()); } #[test] fn test_extension_parsing_edge_cases() { // Test empty extension list let empty_extensions: Vec<u16> = vec![]; assert_eq!( determine_tls_version(&tls_parser::TlsVersion::Tls12, &empty_extensions), TlsVersion::V1_2 ); // Test with supported_versions extension (should detect TLS 1.3) let tls13_extensions = vec![TlsExtensionType::SupportedVersions.into()]; assert_eq!( determine_tls_version(&tls_parser::TlsVersion::Tls12, &tls13_extensions), TlsVersion::V1_3 ); // Test with mixed extensions let mixed_extensions = vec![ TlsExtensionType::ServerName.into(), TlsExtensionType::ApplicationLayerProtocolNegotiation.into(), TlsExtensionType::SupportedVersions.into(), ]; assert_eq!( determine_tls_version(&tls_parser::TlsVersion::Tls12, &mixed_extensions), TlsVersion::V1_3 ); } #[test] fn test_ssl_version_support() { // Test that SSL 3.0 is properly supported (even though rare) let ssl30 = tls_parser::TlsVersion::Ssl30; assert_eq!(determine_tls_version(&ssl30, &[]), TlsVersion::Ssl3_0); // Test SSL 3.0 display formatting assert_eq!(TlsVersion::Ssl3_0.to_string(), "s3"); // Test that SSL 3.0 is included in TLS traffic detection range let ssl30_payload = vec![0x16, 0x03, 0x00, 0x00, 0x05]; // SSL 3.0 handshake assert!(is_tls_traffic(&ssl30_payload)); } #[test] fn test_extract_signature_minimal_extensions() { use tls_parser::{TlsCipherSuiteID, TlsClientHelloContents, TlsCompressionID, TlsVersion}; // Test with minimal extension data that might not parse correctly let client_hello = TlsClientHelloContents { version: TlsVersion::Tls12, random: &[0u8; 32], session_id: None, ciphers: vec![ TlsCipherSuiteID(0x1301), // Valid cipher TlsCipherSuiteID(0x1302), // Valid cipher ], comp: vec![TlsCompressionID(0)], ext: Some(&[0x00, 0x00]), // Minimal extension data that might fail to parse }; // Should succeed even if extension parsing fails - falls back to empty extensions let result = extract_tls_signature_from_client_hello(&client_hello); assert!(result.is_ok(), "Should succeed even with minimal extension data"); let signature = match result { Ok(sig) => sig, Err(_) => panic!("Should not fail after assert"), }; assert_eq!(signature.version, crate::tls::TlsVersion::V1_2); assert_eq!(signature.cipher_suites.len(), 2); // Extensions might be empty if parsing failed, but that's OK for JA4 // Should be able to generate JA4 fingerprint regardless let ja4 = signature.generate_ja4(); assert!(ja4.ja4_a.starts_with("t12i")); // TLS 1.2, no SNI assert!(!ja4.ja4_b.is_empty()); // Cipher suites present }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tls/tests/filter_tests.rs
huginn-net-tls/tests/filter_tests.rs
use huginn_net_tls::{FilterConfig, FilterMode, IpFilter, PortFilter, SubnetFilter}; use std::net::IpAddr; #[test] fn test_port_filter_destination() { let filter = PortFilter::new().destination(443); assert!(filter.matches(12345, 443)); assert!(!filter.matches(12345, 80)); } #[test] fn test_port_filter_source() { let filter = PortFilter::new().source(12345); assert!(filter.matches(12345, 80)); assert!(!filter.matches(54321, 80)); } #[test] fn test_port_filter_list() { let filter = PortFilter::new().destination_list(vec![80, 443, 8080]); assert!(filter.matches(12345, 80)); assert!(filter.matches(12345, 443)); assert!(filter.matches(12345, 8080)); assert!(!filter.matches(12345, 22)); } #[test] fn test_port_filter_range() { let filter = PortFilter::new().destination_range(8000..9000); assert!(filter.matches(12345, 8000)); assert!(filter.matches(12345, 8500)); assert!(filter.matches(12345, 8999)); assert!(!filter.matches(12345, 9000)); assert!(!filter.matches(12345, 7999)); } #[test] fn test_port_filter_any_port() { let filter = PortFilter::new().destination(443).any_port(); assert!(filter.matches(12345, 443)); assert!(filter.matches(443, 80)); assert!(!filter.matches(12345, 80)); } #[test] fn test_port_filter_combined() { let filter = PortFilter::new().source(12345).destination(443); assert!(filter.matches(12345, 443)); assert!(!filter.matches(12345, 80)); assert!(!filter.matches(54321, 443)); } #[test] fn test_ip_filter_v4() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(filter.matches(&ip_other, &ip_match)); assert!(!filter.matches( &ip_other, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_ip_filter_v6() { let filter = IpFilter::new() .allow("2001:db8::1") .unwrap_or_else(|e| panic!("Invalid IPv6 address: {e}")); let ip_match: IpAddr = "2001:db8::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); let ip_other: IpAddr = "2001:db8::2" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(!filter.matches( &ip_other, &"2001:db8::3" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")) )); } #[test] fn test_ip_filter_source_only() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")) .source_only(); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(!filter.matches(&ip_other, &ip_match)); } #[test] fn test_ip_filter_destination_only() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")) .destination_only(); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_other, &ip_match)); assert!(!filter.matches(&ip_match, &ip_other)); } #[test] fn test_subnet_filter_v4() { let filter = SubnetFilter::new() .allow("192.168.1.0/24") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")); let ip_in: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "192.168.2.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_in, &ip_out)); assert!(!filter.matches( &ip_out, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_subnet_filter_v6() { let filter = SubnetFilter::new() .allow("2001:db8::/32") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")); let ip_in: IpAddr = "2001:db8::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); let ip_out: IpAddr = "2001:db9::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); assert!(filter.matches(&ip_in, &ip_out)); assert!(!filter.matches( &ip_out, &"2001:dba::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")) )); } #[test] fn test_subnet_filter_multiple() { let filter = SubnetFilter::new() .allow_list(vec!["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]) .unwrap_or_else(|e| panic!("Invalid CIDR notations: {e}")); let ip1: IpAddr = "10.1.2.3" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "172.16.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip3: IpAddr = "192.168.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip1, &ip_out)); assert!(filter.matches(&ip2, &ip_out)); assert!(filter.matches(&ip3, &ip_out)); assert!(!filter.matches( &ip_out, &"1.1.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_combined_filter_allow() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)) .with_subnet_filter( SubnetFilter::new() .allow("192.168.0.0/16") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")), ); let ip_in: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip_in, &ip_out, 12345, 443)); assert!(!filter.should_process(&ip_in, &ip_out, 12345, 80)); assert!(!filter.should_process( &ip_out, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")), 12345, 443 )); } #[test] fn test_combined_filter_deny() { let filter = FilterConfig::new() .mode(FilterMode::Deny) .with_subnet_filter( SubnetFilter::new() .allow_list(vec!["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]) .unwrap_or_else(|e| panic!("Invalid CIDR notations: {e}")), ); let ip_private: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_public: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(!filter.should_process(&ip_private, &ip_public, 12345, 443)); assert!(filter.should_process(&ip_public, &ip_public, 12345, 443)); } #[test] fn test_no_filters() { let filter = FilterConfig::new(); let ip1: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip1, &ip2, 12345, 443)); assert!(filter.should_process(&ip2, &ip1, 80, 12345)); } #[test] fn test_port_only_filter() { let filter = FilterConfig::new().with_port_filter(PortFilter::new().destination(443)); let ip1: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip1, &ip2, 12345, 443)); assert!(!filter.should_process(&ip1, &ip2, 12345, 80)); } #[test] fn test_ip_only_filter() { let filter = FilterConfig::new().with_ip_filter( IpFilter::new() .allow("8.8.8.8") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")), ); let ip_match: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip_match, &ip_other, 12345, 443)); assert!(filter.should_process(&ip_other, &ip_match, 12345, 443)); assert!(!filter.should_process( &ip_other, &"1.1.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")), 12345, 443 )); } #[test] fn test_invalid_ip() { let result = IpFilter::new().allow("not-an-ip"); assert!(result.is_err()); } #[test] fn test_invalid_cidr() { let result = SubnetFilter::new().allow("192.168.1.0/99"); assert!(result.is_err()); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tls/tests/tls_client_hello_reader.rs
huginn-net-tls/tests/tls_client_hello_reader.rs
use huginn_net_tls::error::HuginnNetTlsError; use huginn_net_tls::tls_client_hello_reader::TlsClientHelloReader; /// Helper function to create a minimal valid TLS ClientHello record /// /// Creates a TLS handshake record containing a ClientHello message. /// Format: [content_type:8][version:16][length:16][handshake_message] fn create_tls_client_hello_record(version: (u8, u8), handshake_payload: &[u8]) -> Vec<u8> { let mut record = Vec::new(); // Content Type: Handshake (0x16) record.push(0x16); // Version (e.g., TLS 1.2 = 0x03 0x03) record.push(version.0); record.push(version.1); // Record length (big-endian u16) let record_len = handshake_payload.len() as u16; record.extend_from_slice(&record_len.to_be_bytes()); // Handshake payload record.extend_from_slice(handshake_payload); record } /// Create a minimal ClientHello handshake message /// /// Format: [handshake_type:8][length:24][version:16][random:32][session_id_len:8][ciphers_len:16][ciphers][comp_len:8][comp][ext_len:16][extensions] fn create_client_hello_handshake( version: (u8, u8), cipher_suites: &[u16], extensions: Option<&[u8]>, ) -> Vec<u8> { let mut handshake = Vec::new(); // Handshake Type: ClientHello (0x01) handshake.push(0x01); // Handshake length (will be filled later) let length_pos = handshake.len(); handshake.extend_from_slice(&[0x00, 0x00, 0x00]); // Version handshake.push(version.0); handshake.push(version.1); // Random (32 bytes) handshake.extend_from_slice(&[0u8; 32]); // Session ID length (0 = no session ID) handshake.push(0x00); // Cipher suites length let cipher_len = (cipher_suites.len().saturating_mul(2)) as u16; handshake.extend_from_slice(&cipher_len.to_be_bytes()); // Cipher suites for &suite in cipher_suites { handshake.extend_from_slice(&suite.to_be_bytes()); } // Compression methods length (1 = NULL compression) handshake.push(0x01); handshake.push(0x00); // NULL compression // Extensions length let ext_len = extensions.map(|e| e.len()).unwrap_or(0) as u16; handshake.extend_from_slice(&ext_len.to_be_bytes()); // Extensions if let Some(ext) = extensions { handshake.extend_from_slice(ext); } // Update handshake length (skip the 4-byte header) let handshake_len = handshake.len().saturating_sub(4) as u32; let length_pos_1 = length_pos.saturating_add(1); let length_pos_2 = length_pos.saturating_add(2); handshake[length_pos] = ((handshake_len >> 16) & 0xFF) as u8; handshake[length_pos_1] = ((handshake_len >> 8) & 0xFF) as u8; handshake[length_pos_2] = (handshake_len & 0xFF) as u8; handshake } #[test] fn test_new() { let reader = TlsClientHelloReader::new(); assert!(!reader.signature_parsed()); assert!(reader.get_signature().is_none()); } #[test] fn test_default() { let reader = TlsClientHelloReader::default(); assert!(!reader.signature_parsed()); assert!(reader.get_signature().is_none()); } #[test] fn test_add_bytes_empty() { let mut reader = TlsClientHelloReader::new(); let result = reader.add_bytes(&[]); assert!(result.is_ok()); if let Ok(value) = result { assert!(value.is_none()); } assert!(!reader.signature_parsed()); } #[test] fn test_add_bytes_insufficient_data() { let mut reader = TlsClientHelloReader::new(); // Less than 5 bytes (minimum TLS record header) let result = reader.add_bytes(&[0x16, 0x03, 0x03]); assert!(result.is_ok()); if let Ok(value) = result { assert!(value.is_none()); } assert!(!reader.signature_parsed()); } #[test] fn test_add_bytes_complete_record() { let mut reader = TlsClientHelloReader::new(); // Create a minimal valid ClientHello let cipher_suites = vec![0x1301u16, 0x1302u16]; // TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384 let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let result = reader.add_bytes(&record); assert!(result.is_ok()); // Should parse successfully if let Ok(signature_result) = result { assert!(signature_result.is_some()); } assert!(reader.signature_parsed()); assert!(reader.get_signature().is_some()); } #[test] fn test_add_bytes_incremental() { let mut reader = TlsClientHelloReader::new(); // Create a ClientHello record let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); // Split into parts let mid_point = record.len() / 2; let part1 = &record[..mid_point]; let part2 = &record[mid_point..]; // Add first part let result1 = reader.add_bytes(part1); assert!(result1.is_ok()); if let Ok(value) = result1 { assert!(value.is_none()); } assert!(!reader.signature_parsed()); // Add second part let result2 = reader.add_bytes(part2); assert!(result2.is_ok()); // Should parse successfully now if let Ok(signature_result) = result2 { assert!(signature_result.is_some()); } assert!(reader.signature_parsed()); } #[test] fn test_add_bytes_after_signature_parsed() { let mut reader = TlsClientHelloReader::new(); // First parse a signature let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let _ = reader.add_bytes(&record); assert!(reader.signature_parsed()); // Try to add more data after signature is parsed let more_data = vec![0x16, 0x03, 0x03, 0x00, 0x10]; let result = reader.add_bytes(&more_data); assert!(result.is_ok()); // Should return None and not process the new data if let Ok(value) = result { assert!(value.is_none()); } // Signature should still be available assert!(reader.signature_parsed()); } #[test] fn test_add_bytes_record_too_large() { let mut reader = TlsClientHelloReader::new(); // Create a record that claims to be larger than 64KB // The check is: needed > 64 * 1024, where needed = record_len + 5 // So we need record_len > 65531. Let's use 65532 which gives needed = 65537 > 65536 let mut large_record = vec![0x16, 0x03, 0x03]; let record_len = 65532u16; // This gives needed = 65537 > 65536 large_record.extend_from_slice(&record_len.to_be_bytes()); // Add enough data so the buffer has the complete record (to trigger the size check) // We need at least record_len bytes of payload large_record.extend_from_slice(&vec![0u8; record_len as usize]); let result = reader.add_bytes(&large_record); // The check is: needed > 64 * 1024, where needed = record_len + 5 // With record_len = 65535, needed = 65540 > 65536, so should error assert!(result.is_err()); if let Err(HuginnNetTlsError::Parse(msg)) = result { assert!(msg.contains("too large") || msg.contains("large")); } } #[test] fn test_add_bytes_incomplete_record() { let mut reader = TlsClientHelloReader::new(); // Create a record header that says length is 1000, but only provide 10 bytes let mut incomplete_record = vec![0x16, 0x03, 0x03]; incomplete_record.extend_from_slice(&1000u16.to_be_bytes()); incomplete_record.extend_from_slice(&[0u8; 10]); // Only 10 bytes, not 1000 let result = reader.add_bytes(&incomplete_record); assert!(result.is_ok()); // Should return None (need more data) if let Ok(value) = result { assert!(value.is_none()); } assert!(!reader.signature_parsed()); } #[test] fn test_get_signature_before_parsing() { let reader = TlsClientHelloReader::new(); assert!(reader.get_signature().is_none()); } #[test] fn test_get_signature_after_parsing() { let mut reader = TlsClientHelloReader::new(); let cipher_suites = vec![0x1301u16, 0x1302u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let _ = reader.add_bytes(&record); let signature = reader.get_signature(); assert!(signature.is_some()); if let Some(sig) = signature { assert_eq!(sig.cipher_suites.len(), 2); assert!(sig.cipher_suites.contains(&0x1301)); assert!(sig.cipher_suites.contains(&0x1302)); } } #[test] fn test_signature_parsed() { let mut reader = TlsClientHelloReader::new(); assert!(!reader.signature_parsed()); let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let _ = reader.add_bytes(&record); assert!(reader.signature_parsed()); } #[test] fn test_reset() { let mut reader = TlsClientHelloReader::new(); // Parse a signature first let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let _ = reader.add_bytes(&record); assert!(reader.signature_parsed()); // Reset reader.reset(); // Should be back to initial state assert!(!reader.signature_parsed()); assert!(reader.get_signature().is_none()); // Should be able to parse again let _ = reader.add_bytes(&record); assert!(reader.signature_parsed()); } #[test] fn test_multiple_add_bytes_calls() { let mut reader = TlsClientHelloReader::new(); // Create a ClientHello record let cipher_suites = vec![0x1301u16, 0x1302u16, 0x1303u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); // Split into multiple parts let part1 = &record[..5]; // Just the header let part2 = &record[5..record.len() / 2]; let part3 = &record[record.len() / 2..]; // Add parts incrementally let result1 = reader.add_bytes(part1); assert!(result1.is_ok()); if let Ok(value) = result1 { assert!(value.is_none()); } let result2 = reader.add_bytes(part2); assert!(result2.is_ok()); // May or may not have enough data yet let sig2 = result2.unwrap_or_default(); let result3 = reader.add_bytes(part3); assert!(result3.is_ok()); // Should parse successfully now let sig3 = result3.unwrap_or_default(); assert!(sig2.is_some() || sig3.is_some()); assert!(reader.signature_parsed()); } #[test] fn test_record_length_calculation() { let mut reader = TlsClientHelloReader::new(); // Create a record with specific length let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); // Verify record length is correct let record_len = u16::from_be_bytes([record[3], record[4]]) as usize; assert_eq!(record_len + 5, record.len()); // Should parse successfully let result = reader.add_bytes(&record); assert!(result.is_ok()); if let Ok(value) = result { assert!(value.is_some()); } } #[test] fn test_tls_1_2_version() { let mut reader = TlsClientHelloReader::new(); // TLS 1.2 = 0x03 0x03 let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let result = reader.add_bytes(&record); assert!(result.is_ok()); if let Ok(Some(signature)) = result { assert_eq!(signature.version, huginn_net_tls::tls::TlsVersion::V1_2); } } #[test] fn test_tls_1_3_version() { let mut reader = TlsClientHelloReader::new(); // TLS 1.3 uses 0x03 0x03 in record but 0x03 0x04 in handshake // For TLS 1.3, we need supported_versions extension let cipher_suites = vec![0x1301u16, 0x1302u16, 0x1303u16]; // Create extensions with supported_versions let mut extensions = Vec::new(); // Extension: supported_versions (0x002b) extensions.extend_from_slice(&0x002bu16.to_be_bytes()); extensions.extend_from_slice(&0x0002u16.to_be_bytes()); // Length: 2 bytes extensions.extend_from_slice(&0x0304u16.to_be_bytes()); // TLS 1.3 let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, Some(&extensions)); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let result = reader.add_bytes(&record); assert!(result.is_ok()); if let Ok(Some(signature)) = result { assert_eq!(signature.version, huginn_net_tls::tls::TlsVersion::V1_3); } } #[test] fn test_invalid_tls_record() { let mut reader = TlsClientHelloReader::new(); // Invalid TLS record (not a handshake) let invalid_record = vec![0x17, 0x03, 0x03, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00]; let result = reader.add_bytes(&invalid_record); // Should return an error (no ClientHello found) assert!(result.is_err()); } #[test] fn test_grease_filtering() { let mut reader = TlsClientHelloReader::new(); // Create ClientHello with GREASE cipher suite let cipher_suites = vec![0x1301u16, 0x0a0au16, 0x1302u16]; // GREASE in middle let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let result = reader.add_bytes(&record); assert!(result.is_ok()); if let Ok(Some(sig)) = result { // GREASE should be filtered out assert_eq!(sig.cipher_suites.len(), 2); assert!(sig.cipher_suites.contains(&0x1301)); assert!(sig.cipher_suites.contains(&0x1302)); assert!(!sig.cipher_suites.contains(&0x0a0a)); } } #[test] fn test_client_hello_with_extensions() { let mut reader = TlsClientHelloReader::new(); let cipher_suites = vec![0x1301u16]; // Create extensions: SNI extension // Format according to RFC 6066: // Extension: [extension_type:16][extension_length:16][server_name_list] // server_name_list: [list_length:16][server_name_entry...] // server_name_entry: [name_type:8][hostname_length:16][hostname] let mut extensions = Vec::new(); // Extension: server_name (0x0000) extensions.extend_from_slice(&0x0000u16.to_be_bytes()); // Calculate lengths let hostname = b"example.com"; let hostname_len = hostname.len() as u16; let entry_len = 1u16 + 2u16 + hostname_len; // name_type(1) + hostname_length(2) + hostname let list_len = 2u16 + entry_len; // list_length(2) + entry let ext_data_len = list_len; // Extension data is just the server_name_list extensions.extend_from_slice(&ext_data_len.to_be_bytes()); // Extension data length extensions.extend_from_slice(&list_len.to_be_bytes()); // Server name list length extensions.push(0x00); // Name type: hostname (0x00) extensions.extend_from_slice(&hostname_len.to_be_bytes()); // Hostname length extensions.extend_from_slice(hostname); // Hostname let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, Some(&extensions)); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let result = reader.add_bytes(&record); // The parsing should succeed (even if extension parsing fails) // The important thing is that the reader handles ClientHello with extensions gracefully assert!(result.is_ok()); if let Ok(Some(sig)) = result { // Verify basic signature fields are present assert_eq!(sig.cipher_suites.len(), 1); assert!(sig.cipher_suites.contains(&0x1301)); } } #[test] fn test_buffer_capacity() { // Verify initial capacity is set (implementation detail, but good to test) // The buffer should have capacity for reasonable TLS records // We can't directly access buffer, but we can test it works with large records let mut reader2 = TlsClientHelloReader::new(); // Create a larger ClientHello let cipher_suites: Vec<u16> = (0..50).map(|i| 0x1301u16 + i).collect(); let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); let result = reader2.add_bytes(&record); assert!(result.is_ok()); // Should handle larger records without issues // The result may be Some or None depending on parsing success, both are valid let _signature = result.unwrap_or_default(); // Signature parsed or not, both are valid outcomes } #[test] fn test_exact_record_length() { let mut reader = TlsClientHelloReader::new(); // Create a record where we have exactly the right amount of data let cipher_suites = vec![0x1301u16]; let handshake = create_client_hello_handshake((0x03, 0x03), &cipher_suites, None); let record = create_tls_client_hello_record((0x03, 0x03), &handshake); // Add exactly the record length let result = reader.add_bytes(&record); assert!(result.is_ok()); // Should parse successfully if let Ok(value) = result { assert!(value.is_some()); } assert!(reader.signature_parsed()); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tls/tests/golden_tests.rs
huginn-net-tls/tests/golden_tests.rs
use huginn_net_tls::{HuginnNetTls, TlsClientOutput, TlsVersion}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::Path; use std::sync::mpsc; use std::thread; #[derive(Debug, Deserialize, Serialize)] struct PcapSnapshot { pcap_file: String, pcap_path: String, expected_connections: usize, connections: Vec<ConnectionSnapshot>, } #[derive(Debug, Deserialize, Serialize)] struct ConnectionSnapshot { source: EndpointSnapshot, destination: EndpointSnapshot, tls: TlsSnapshot, } #[derive(Debug, Deserialize, Serialize)] struct EndpointSnapshot { ip: String, port: u16, } #[derive(Debug, Deserialize, Serialize)] struct TlsSnapshot { version: String, sni: String, alpn: String, cipher_suites_count: usize, extensions_count: usize, has_signature_algorithms: bool, has_elliptic_curves: bool, ja4: Ja4Snapshot, ja4_original: Ja4Snapshot, } #[derive(Debug, Deserialize, Serialize)] struct Ja4Snapshot { full: String, ja4_a: String, ja4_b: String, ja4_c: String, raw: String, } fn load_snapshot(pcap_file: &str) -> PcapSnapshot { let snapshot_path = format!("tests/snapshots/{pcap_file}.json"); let snapshot_content = fs::read_to_string(&snapshot_path) .unwrap_or_else(|_| panic!("Failed to read snapshot file: {snapshot_path}")); serde_json::from_str(&snapshot_content) .unwrap_or_else(|e| panic!("Failed to parse snapshot JSON: {e}")) } fn analyze_pcap_file(pcap_path: &str) -> Vec<TlsClientOutput> { assert!(Path::new(pcap_path).exists(), "PCAP file must exist: {pcap_path}"); let mut analyzer = HuginnNetTls::new(); let (sender, receiver) = mpsc::channel::<TlsClientOutput>(); let pcap_file = pcap_path.to_string(); let handle = thread::spawn(move || analyzer.analyze_pcap(&pcap_file, sender, None)); let mut results = Vec::new(); for tls_output in receiver { results.push(tls_output); } match handle.join() { Ok(Ok(())) => {} Ok(Err(e)) => { panic!("PCAP analysis failed: {e}"); } Err(e) => { panic!("Thread join failed: {e:?}"); } } results } fn assert_connection_matches_snapshot( actual: &TlsClientOutput, expected: &ConnectionSnapshot, connection_index: usize, ) { assert_eq!( actual.source.ip.to_string(), expected.source.ip, "Connection {connection_index}: Source IP mismatch" ); assert_eq!( actual.source.port, expected.source.port, "Connection {connection_index}: Source port mismatch" ); assert_eq!( actual.destination.ip.to_string(), expected.destination.ip, "Connection {connection_index}: Destination IP mismatch" ); assert_eq!( actual.destination.port, expected.destination.port, "Connection {connection_index}: Destination port mismatch" ); let expected_version = match expected.tls.version.as_str() { "V1_3" => TlsVersion::V1_3, "V1_2" => TlsVersion::V1_2, "V1_1" => TlsVersion::V1_1, "V1_0" => TlsVersion::V1_0, _ => panic!("Unknown TLS version: {}", expected.tls.version), }; assert_eq!( actual.sig.version, expected_version, "Connection {connection_index}: TLS version mismatch" ); assert_eq!( actual.sig.sni, Some(expected.tls.sni.clone()), "Connection {connection_index}: SNI mismatch" ); assert_eq!( actual.sig.alpn, Some(expected.tls.alpn.clone()), "Connection {connection_index}: ALPN mismatch" ); assert_eq!( actual.sig.cipher_suites.len(), expected.tls.cipher_suites_count, "Connection {connection_index}: Cipher suites count mismatch" ); assert_eq!( actual.sig.extensions.len(), expected.tls.extensions_count, "Connection {connection_index}: Extensions count mismatch" ); assert_eq!( !actual.sig.signature_algorithms.is_empty(), expected.tls.has_signature_algorithms, "Connection {connection_index}: Signature algorithms presence mismatch" ); // Elliptic curves presence assert_eq!( !actual.sig.elliptic_curves.is_empty(), expected.tls.has_elliptic_curves, "Connection {connection_index}: Elliptic curves presence mismatch" ); assert_eq!( actual.sig.ja4.full.to_string(), expected.tls.ja4.full, "Connection {connection_index}: JA4 full fingerprint mismatch" ); assert_eq!( actual.sig.ja4.ja4_a, expected.tls.ja4.ja4_a, "Connection {connection_index}: JA4_a mismatch" ); assert_eq!( actual.sig.ja4.ja4_b, expected.tls.ja4.ja4_b, "Connection {connection_index}: JA4_b mismatch" ); assert_eq!( actual.sig.ja4.ja4_c, expected.tls.ja4.ja4_c, "Connection {connection_index}: JA4_c mismatch" ); assert_eq!( actual.sig.ja4.raw.to_string(), expected.tls.ja4.raw, "Connection {connection_index}: JA4 raw mismatch" ); assert_eq!( actual.sig.ja4_original.full.to_string(), expected.tls.ja4_original.full, "Connection {connection_index}: JA4 original full fingerprint mismatch" ); assert_eq!( actual.sig.ja4_original.ja4_a, expected.tls.ja4_original.ja4_a, "Connection {connection_index}: JA4 original ja4_a mismatch" ); assert_eq!( actual.sig.ja4_original.ja4_b, expected.tls.ja4_original.ja4_b, "Connection {connection_index}: JA4 original ja4_b mismatch" ); assert_eq!( actual.sig.ja4_original.ja4_c, expected.tls.ja4_original.ja4_c, "Connection {connection_index}: JA4 original ja4_c mismatch" ); assert_eq!( actual.sig.ja4_original.raw.to_string(), expected.tls.ja4_original.raw, "Connection {connection_index}: JA4 original raw mismatch" ); } /// Golden test: compares PCAP analysis output against known-good JSON snapshots fn test_pcap_with_snapshot(pcap_file: &str) { let snapshot = load_snapshot(pcap_file); let results = analyze_pcap_file(&snapshot.pcap_path); assert_eq!( results.len(), snapshot.expected_connections, "Expected {} connections in {}, found {}", snapshot.expected_connections, pcap_file, results.len() ); for (i, (actual, expected)) in results.iter().zip(snapshot.connections.iter()).enumerate() { assert_connection_matches_snapshot(actual, expected, i); } } #[test] fn test_golden_pcap_snapshots() { let golden_test_cases = [ "tls12.pcap", "tls-alpn-h2.pcap", // IPv6 TLS 1.2 with NULL datalink format ]; for pcap_file in golden_test_cases { println!("Running golden test for: {pcap_file}"); test_pcap_with_snapshot(pcap_file); } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tls/tests/parallel_tests.rs
huginn-net-tls/tests/parallel_tests.rs
use huginn_net_tls::parallel::{DispatchResult, PoolStats, WorkerPool, WorkerStats}; use huginn_net_tls::HuginnNetTlsError; use std::sync::mpsc; use std::sync::Arc; use std::thread; fn unwrap_worker_pool(result: Result<WorkerPool, HuginnNetTlsError>) -> WorkerPool { match result { Ok(pool) => pool, Err(e) => panic!("Failed to create WorkerPool: {e}"), } } #[test] fn test_worker_pool_rejects_zero_workers() { let (tx, _rx) = mpsc::channel(); let result = WorkerPool::new(0, 100, 32, 10, tx, None); assert!(result.is_err()); } #[test] fn test_worker_pool_creates_with_valid_workers() { let (tx, _rx) = mpsc::channel(); let result = WorkerPool::new(4, 100, 32, 10, tx, None); assert!(result.is_ok()); let pool = unwrap_worker_pool(result); assert_eq!(pool.num_workers.get(), 4); } #[test] fn test_round_robin_dispatch() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(3, 10, 32, 10, tx, None)); // Dispatch 9 packets (3 per worker) for _ in 0..9 { let result = pool.dispatch(vec![0u8; 100]); assert_eq!(result, DispatchResult::Queued); } let stats = pool.stats(); assert_eq!(stats.total_dispatched, 9); assert_eq!(stats.total_dropped, 0); // All workers should have received packets (round-robin) for worker_stat in &stats.workers { assert!(worker_stat.queue_size > 0 || worker_stat.dropped == 0); } } #[test] fn test_queue_overflow_handling() { let (tx, _rx) = mpsc::channel(); let queue_size = 5; let pool = unwrap_worker_pool(WorkerPool::new(2, queue_size, 32, 10, tx, None)); let mut queued = 0; let mut dropped = 0; // Try to dispatch many packets to overflow queues for _ in 0..100 { match pool.dispatch(vec![0u8; 100]) { DispatchResult::Queued => queued += 1, DispatchResult::Dropped => dropped += 1, } } // Should have some dropped packets due to queue overflow assert!(dropped > 0, "Expected some packets to be dropped"); assert!(queued > 0, "Expected some packets to be queued"); let stats = pool.stats(); assert_eq!(stats.total_dispatched, queued); assert_eq!(stats.total_dropped, dropped); } #[test] fn test_stats_accuracy() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(2, 100, 32, 10, tx, None)); // Dispatch some packets let dispatch_count = 10; for _ in 0..dispatch_count { pool.dispatch(vec![0u8; 100]); } let stats = pool.stats(); assert_eq!(stats.total_dispatched, dispatch_count); assert_eq!(stats.workers.len(), 2); } #[test] fn test_shutdown_stops_accepting_packets() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(2, 100, 32, 10, tx, None)); // Dispatch before shutdown should work let result = pool.dispatch(vec![0u8; 100]); assert_eq!(result, DispatchResult::Queued); // Shutdown the pool pool.shutdown(); // Dispatch after shutdown should return Dropped let result = pool.dispatch(vec![0u8; 100]); assert_eq!(result, DispatchResult::Dropped); } #[test] fn test_per_worker_dropped_count() { let (tx, _rx) = mpsc::channel(); let queue_size = 2; let pool = unwrap_worker_pool(WorkerPool::new(1, queue_size, 32, 10, tx, None)); // Fill the single worker's queue for _ in 0..10 { pool.dispatch(vec![0u8; 100]); } let stats = pool.stats(); // Should have dropped packets assert!(stats.total_dropped > 0); // The single worker should have the drops assert_eq!(stats.workers.len(), 1); assert_eq!(stats.workers[0].dropped, stats.total_dropped); } #[test] fn test_concurrent_dispatch() { let (tx, _rx) = mpsc::channel(); let pool = Arc::new(unwrap_worker_pool(WorkerPool::new(4, 100, 32, 10, tx, None))); let handles: Vec<_> = (0..4) .map(|_| { let pool_clone = Arc::clone(&pool); thread::spawn(move || { for _ in 0..25 { pool_clone.dispatch(vec![0u8; 100]); } }) }) .collect(); for handle in handles { if handle.join().is_err() { panic!("Thread panicked during concurrent dispatch test"); } } let stats = pool.stats(); // 4 threads * 25 dispatches = 100 total (some might be queued, some dropped) let total_processed = stats.total_dispatched + stats.total_dropped; assert_eq!(total_processed, 100); } #[test] fn test_worker_stats_display() { let worker = WorkerStats { id: 0, queue_size: 5, dropped: 10 }; let output = format!("{worker}"); assert!(output.contains("Worker 0")); assert!(output.contains("queue_size=5")); assert!(output.contains("dropped=10")); } #[test] fn test_pool_stats_display() { let stats = PoolStats { total_dispatched: 100, total_dropped: 5, workers: vec![ WorkerStats { id: 0, queue_size: 2, dropped: 3 }, WorkerStats { id: 1, queue_size: 1, dropped: 2 }, ], }; let output = format!("{stats}"); assert!(output.contains("dispatched: 100")); assert!(output.contains("dropped: 5")); assert!(output.contains("Worker 0")); assert!(output.contains("Worker 1")); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tls/tests/packet_parser.rs
huginn-net-tls/tests/packet_parser.rs
use huginn_net_tls::packet_parser::{ detect_datalink_format, parse_packet, DatalinkFormat, IpPacket, }; #[test] fn test_detect_null_datalink() { // NULL datalink: 4-byte header + IPv6 packet let null_packet = vec![ 0x1e, 0x00, 0x00, 0x00, // NULL header 0x60, 0x00, 0x00, 0x00, // IPv6 header start (version=6) 0x00, 0x14, 0x06, 0x40, // IPv6 payload length, next header (TCP), hop limit // Add minimal IPv6 addresses (32 bytes total) 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // src 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // dst ]; let format = detect_datalink_format(&null_packet); assert_eq!(format, Some(DatalinkFormat::Null)); } #[test] fn test_detect_raw_ipv4() { // Raw IPv4 packet (no Ethernet header) let raw_ipv4 = vec![ 0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28 0x00, 0x01, 0x40, 0x00, // ID=1, Flags=0x4000 (DF), Fragment Offset=0 0x40, 0x06, 0x00, 0x00, // TTL=64, Protocol=6 (TCP), Header Checksum=0 0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1 0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2 ]; let format = detect_datalink_format(&raw_ipv4); assert_eq!(format, Some(DatalinkFormat::RawIp)); } #[test] fn test_detect_raw_ipv6() { // Raw IPv6 packet (no Ethernet header) let raw_ipv6 = vec![ 0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0 0x00, 0x14, 0x06, 0x40, // Payload Length=20, Next Header=6 (TCP), Hop Limit=64 // IPv6 addresses (32 bytes) 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // src 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // dst ]; let format = detect_datalink_format(&raw_ipv6); assert_eq!(format, Some(DatalinkFormat::RawIp)); } #[test] fn test_detect_ethernet_ipv4() { // Ethernet frame with IPv4 let ethernet_ipv4 = vec![ // Ethernet header (14 bytes) 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, // Destination MAC 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, // Source MAC 0x08, 0x00, // EtherType: IPv4 // IPv4 packet 0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28 0x00, 0x01, 0x40, 0x00, // ID=1, Flags=0x4000 (DF), Fragment Offset=0 0x40, 0x06, 0x00, 0x00, // TTL=64, Protocol=6 (TCP), Header Checksum=0 0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1 0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2 ]; let format = detect_datalink_format(&ethernet_ipv4); assert_eq!(format, Some(DatalinkFormat::Ethernet)); } #[test] fn test_parse_null_datalink_packet() { // NULL datalink packet with IPv6 let null_packet = vec![ 0x1e, 0x00, 0x00, 0x00, // NULL header 0x60, 0x00, 0x00, 0x00, // IPv6 header start 0x00, 0x14, 0x06, 0x40, // IPv6 payload length, next header, hop limit // IPv6 addresses (32 bytes) 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]; let result = parse_packet(&null_packet); match result { IpPacket::Ipv6(ipv6) => { // Should have parsed IPv6 packet assert_eq!(ipv6.get_version(), 6); } _ => panic!("Expected IPv6 packet"), } } #[test] fn test_parse_raw_ipv4_packet() { let raw_ipv4 = vec![ 0x45, 0x00, 0x00, 0x1c, // IPv4 header 0x00, 0x01, 0x40, 0x00, 0x40, 0x06, 0x00, 0x00, 0xc0, 0xa8, 0x01, 0x01, // Source IP 0xc0, 0xa8, 0x01, 0x02, // Dest IP ]; let result = parse_packet(&raw_ipv4); match result { IpPacket::Ipv4(ipv4) => { assert_eq!(ipv4.get_version(), 4); } _ => panic!("Expected IPv4 packet"), } } #[test] fn test_parse_ethernet_packet() { let ethernet_ipv4 = vec![ // Ethernet header 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, // Dest MAC 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, // Src MAC 0x08, 0x00, // EtherType: IPv4 // IPv4 packet 0x45, 0x00, 0x00, 0x1c, 0x00, 0x01, 0x40, 0x00, 0x40, 0x06, 0x00, 0x00, 0xc0, 0xa8, 0x01, 0x01, 0xc0, 0xa8, 0x01, 0x02, ]; let result = parse_packet(&ethernet_ipv4); match result { IpPacket::Ipv4(ipv4) => { // Should have parsed IPv4 packet assert_eq!(ipv4.get_version(), 4); } _ => panic!("Expected IPv4 packet"), } } #[test] fn test_parse_invalid_packet() { let invalid_packet = vec![0x00, 0x01, 0x02]; // Too small let result = parse_packet(&invalid_packet); match result { IpPacket::None => {} // Expected _ => panic!("Expected None for invalid packet"), } } #[test] fn test_parse_unknown_ethernet_type() { let unknown_ethernet = vec![ // Ethernet header with unknown EtherType 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, // Dest MAC 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, // Src MAC 0xFF, 0xFF, // Unknown EtherType 0x45, 0x00, 0x00, 0x1c, // Would be IPv4 but wrong EtherType ]; let result = parse_packet(&unknown_ethernet); match result { IpPacket::None => {} // Expected - unknown EtherType _ => panic!("Expected None for unknown EtherType"), } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/observable.rs
huginn-net-tcp/src/observable.rs
use huginn_net_db::observable_signals::TcpObservation; #[derive(Debug, Clone)] pub struct ObservableTcp { /// Core matching data for fingerprinting pub matching: TcpObservation, } // Observable MTU signals pub struct ObservableMtu { pub value: u16, } // Observable Uptime signals #[derive(Debug, Clone)] pub struct ObservableUptime { pub days: u32, pub hours: u32, pub min: u32, pub up_mod_days: u32, pub freq: f64, }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/tcp_process.rs
huginn-net-tcp/src/tcp_process.rs
use crate::error::HuginnNetTcpError; use crate::ip_options::IpOptions; use crate::observable::{ObservableMtu, ObservableTcp, ObservableUptime}; use crate::tcp::{IpVersion, PayloadSize, Quirk, TcpOption, Ttl, WindowSize}; use crate::uptime::check_ts_tcp; use crate::uptime::{Connection, ConnectionKey, TcpTimestamp}; use crate::window_size::detect_win_multiplicator; use crate::{mtu, ttl}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::{ ipv4::{Ipv4Flags, Ipv4Packet}, ipv6::Ipv6Packet, tcp::{TcpFlags, TcpOptionNumbers::*, TcpOptionPacket, TcpPacket}, Packet, PacketSize, }; use std::convert::TryInto; use std::net::IpAddr; use ttl_cache::TtlCache; /// Congestion encountered const IP_TOS_CE: u8 = 0x01; /// ECN supported const IP_TOS_ECT: u8 = 0x02; /// Must be zero const IP4_MBZ: u8 = 0b0100; // Internal representation of a TCP package pub struct ObservableTCPPackage { pub tcp_request: Option<ObservableTcp>, pub tcp_response: Option<ObservableTcp>, pub mtu: Option<ObservableMtu>, pub client_uptime: Option<ObservableUptime>, pub server_uptime: Option<ObservableUptime>, } pub fn from_client(tcp_flags: u8) -> bool { use TcpFlags::*; tcp_flags & SYN != 0 && tcp_flags & ACK == 0 } pub fn from_server(tcp_flags: u8) -> bool { use TcpFlags::*; tcp_flags & SYN != 0 && tcp_flags & ACK != 0 } /// Determines if a packet is from the client side of a connection. /// /// This function uses a two-phase approach: /// 1. During TCP handshake: Uses SYN/SYN+ACK flags for definitive identification /// 2. After handshake: Uses port heuristics (ephemeral vs well-known ports) /// /// # Returns /// `true` if the packet is from the client, `false` if from the server /// /// # Port Heuristic /// - Ephemeral ports (>1024) typically indicate client-side /// - Well-known ports (≤1024) typically indicate server-side /// - A packet from high port to low port is likely from client pub fn is_packet_from_client(tcp_flags: u8, src_port: u16, dst_port: u16) -> bool { if from_client(tcp_flags) { // SYN packet (no ACK) is definitely from client true } else if from_server(tcp_flags) { // SYN+ACK packet is definitely from server false } else { src_port > 1024 && dst_port <= 1024 } } pub fn is_valid(tcp_flags: u8, tcp_type: u8) -> bool { use TcpFlags::*; !(((tcp_flags & SYN) == SYN && (tcp_flags & (FIN | RST)) != 0) || (tcp_flags & (FIN | RST)) == (FIN | RST) || tcp_type == 0) } pub fn process_tcp_ipv4( packet: &Ipv4Packet, connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, ) -> Result<ObservableTCPPackage, HuginnNetTcpError> { if packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { return Err(HuginnNetTcpError::UnsupportedProtocol("IPv4".to_string())); } if packet.get_fragment_offset() > 0 || (packet.get_flags() & Ipv4Flags::MoreFragments) == Ipv4Flags::MoreFragments { return Err(HuginnNetTcpError::UnexpectedPackage("IPv4".to_string())); } let version = IpVersion::V4; let ttl_observed: u8 = packet.get_ttl(); let ttl: Ttl = ttl::calculate_ttl(ttl_observed); let olen: u8 = IpOptions::calculate_ipv4_length(packet); let mut quirks = vec![]; if (packet.get_ecn() & (IP_TOS_CE | IP_TOS_ECT)) != 0 { quirks.push(Quirk::Ecn); } if (packet.get_flags() & IP4_MBZ) != 0 { quirks.push(Quirk::MustBeZero); } if (packet.get_flags() & Ipv4Flags::DontFragment) != 0 { quirks.push(Quirk::Df); if packet.get_identification() != 0 { quirks.push(Quirk::NonZeroID); } } else if packet.get_identification() == 0 { quirks.push(Quirk::ZeroID); } let source_ip: IpAddr = IpAddr::V4(packet.get_source()); let destination_ip = IpAddr::V4(packet.get_destination()); let tcp_payload = packet.payload(); // Get a reference to the payload without moving `packet` let ip_package_header_length: u8 = packet.get_header_length(); TcpPacket::new(tcp_payload) .ok_or_else(|| HuginnNetTcpError::UnexpectedPackage("TCP packet too short".to_string())) .and_then(|tcp_packet| { visit_tcp( connection_tracker, &tcp_packet, version, ttl, ip_package_header_length, olen, quirks, source_ip, destination_ip, ) }) } pub fn process_tcp_ipv6( packet: &Ipv6Packet, connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, ) -> Result<ObservableTCPPackage, HuginnNetTcpError> { if packet.get_next_header() != IpNextHeaderProtocols::Tcp { return Err(HuginnNetTcpError::UnsupportedProtocol("IPv6".to_string())); } let version = IpVersion::V6; let ttl_observed: u8 = packet.get_hop_limit(); let ttl: Ttl = ttl::calculate_ttl(ttl_observed); let olen: u8 = IpOptions::calculate_ipv6_length(packet); let mut quirks = vec![]; if packet.get_flow_label() != 0 { quirks.push(Quirk::FlowID); } if (packet.get_traffic_class() & (IP_TOS_CE | IP_TOS_ECT)) != 0 { quirks.push(Quirk::Ecn); } let source_ip: IpAddr = IpAddr::V6(packet.get_source()); let destination_ip = IpAddr::V6(packet.get_destination()); let ip_package_header_length: u8 = 40; //IPv6 header is always 40 bytes TcpPacket::new(packet.payload()) .ok_or_else(|| HuginnNetTcpError::UnexpectedPackage("TCP packet too short".to_string())) .and_then(|tcp_packet| { visit_tcp( connection_tracker, &tcp_packet, version, ttl, ip_package_header_length, olen, quirks, source_ip, destination_ip, ) }) } #[allow(clippy::too_many_arguments)] fn visit_tcp( connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, tcp: &TcpPacket, version: IpVersion, ittl: Ttl, ip_package_header_length: u8, olen: u8, mut quirks: Vec<Quirk>, source_ip: IpAddr, destination_ip: IpAddr, ) -> Result<ObservableTCPPackage, HuginnNetTcpError> { use TcpFlags::*; let flags: u8 = tcp.get_flags(); let from_client: bool = from_client(flags); let tcp_type: u8 = flags & (SYN | ACK | FIN | RST); if !is_valid(flags, tcp_type) { return Err(HuginnNetTcpError::InvalidTcpFlags(flags)); } if (flags & (ECE | CWR)) != 0 { quirks.push(Quirk::Ecn); } if tcp.get_sequence() == 0 { quirks.push(Quirk::SeqNumZero); } if flags & ACK == ACK { if tcp.get_acknowledgement() == 0 { quirks.push(Quirk::AckNumZero); } } else if tcp.get_acknowledgement() != 0 && flags & RST == 0 { quirks.push(Quirk::AckNumNonZero); } if flags & URG == URG { quirks.push(Quirk::Urg); } else if tcp.get_urgent_ptr() != 0 { quirks.push(Quirk::NonZeroURG); } if flags & PSH == PSH { quirks.push(Quirk::Push); } let mut buf = tcp.get_options_raw(); let mut mss = None; let mut wscale = None; let mut olayout = vec![]; let mut client_uptime: Option<ObservableUptime> = None; let mut server_uptime: Option<ObservableUptime> = None; while let Some(opt) = TcpOptionPacket::new(buf) { buf = &buf[opt.packet_size().min(buf.len())..]; let data: &[u8] = opt.payload(); match opt.get_number() { EOL => { olayout.push(TcpOption::Eol(buf.len() as u8)); if buf.iter().any(|&b| b != 0) { quirks.push(Quirk::TrailinigNonZero); } } NOP => { olayout.push(TcpOption::Nop); } MSS => { olayout.push(TcpOption::Mss); if data.len() >= 2 { let mss_value: u16 = u16::from_be_bytes([data[0], data[1]]); mss = Some(mss_value); } } WSCALE => { olayout.push(TcpOption::Ws); wscale = Some(data[0]); if data[0] > 14 { quirks.push(Quirk::ExcessiveWindowScaling); } } SACK_PERMITTED => { olayout.push(TcpOption::Sok); } SACK => { olayout.push(TcpOption::Sack); } TIMESTAMPS => { olayout.push(TcpOption::TS); if data.len() >= 4 { let ts_val_bytes: [u8; 4] = data[..4].try_into().map_err(|_| { HuginnNetTcpError::Parse( "Failed to convert slice to array for timestamp value".to_string(), ) })?; if u32::from_be_bytes(ts_val_bytes) == 0 { quirks.push(Quirk::OwnTimestampZero); } } if data.len() >= 8 && tcp_type == SYN { let ts_peer_bytes: [u8; 4] = data[4..8].try_into().map_err(|_| { HuginnNetTcpError::Parse( "Failed to convert slice to array for peer timestamp value".to_string(), ) })?; if u32::from_be_bytes(ts_peer_bytes) != 0 { quirks.push(Quirk::PeerTimestampNonZero); } } if data.len() >= 8 { let ts_val_bytes: [u8; 4] = data[..4].try_into().map_err(|_| { HuginnNetTcpError::Parse( "Failed to convert slice to array for timestamp value".to_string(), ) })?; let ts_val: u32 = u32::from_be_bytes(ts_val_bytes); let connection: Connection = Connection { src_ip: source_ip, src_port: tcp.get_source(), dst_ip: destination_ip, dst_port: tcp.get_destination(), }; let is_from_client = is_packet_from_client(flags, tcp.get_source(), tcp.get_destination()); let (cli_uptime, srv_uptime) = check_ts_tcp(connection_tracker, &connection, is_from_client, ts_val); client_uptime = cli_uptime; server_uptime = srv_uptime; } } _ => { olayout.push(TcpOption::Unknown(opt.get_number().0)); } } } let mtu: Option<ObservableMtu> = match (mss, &version) { (Some(mss_value), IpVersion::V4) => { mtu::extract_from_ipv4(tcp, ip_package_header_length, mss_value) } (Some(mss_value), IpVersion::V6) => { mtu::extract_from_ipv6(tcp, ip_package_header_length, mss_value) } _ => None, }; let wsize: WindowSize = detect_win_multiplicator( tcp.get_window(), mss.unwrap_or(0), ip_package_header_length as u16, olayout.contains(&TcpOption::TS), &version, ); let tcp_signature: ObservableTcp = ObservableTcp { matching: huginn_net_db::observable_signals::TcpObservation { version, ittl, olen, mss, wsize, wscale, olayout, quirks, pclass: if tcp.payload().is_empty() { PayloadSize::Zero } else { PayloadSize::NonZero }, }, }; Ok(ObservableTCPPackage { tcp_request: if from_client { Some(tcp_signature.clone()) } else { None }, tcp_response: if !from_client { Some(tcp_signature) } else { None }, mtu: if from_client { mtu } else { None }, client_uptime, server_uptime, }) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/lib.rs
huginn-net-tcp/src/lib.rs
#![forbid(unsafe_code)] pub use huginn_net_db as db; pub use huginn_net_db::tcp; pub mod filter; pub mod ip_options; pub mod mtu; pub mod packet_hash; pub mod packet_parser; pub mod parallel; pub mod raw_filter; pub mod tcp_process; pub mod ttl; pub mod uptime; pub mod window_size; pub mod display; pub mod error; pub mod observable; pub mod output; pub mod process; pub mod signature_matcher; // Re-exports pub use error::*; pub use filter::*; pub use observable::*; pub use output::*; pub use parallel::{DispatchResult, PoolStats, WorkerPool, WorkerStats}; pub use process::*; pub use signature_matcher::*; pub use tcp_process::*; pub use uptime::{ calculate_uptime_improved, Connection, ConnectionKey, FrequencyState, TcpTimestamp, UptimeTracker, }; use crate::packet_parser::{parse_packet, IpPacket}; use pcap_file::pcap::PcapReader; use pnet::datalink::{self, Channel, Config}; use std::fs::File; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; use tracing::{debug, error}; use ttl_cache::TtlCache; /// Configuration for parallel processing. #[derive(Debug, Clone, Copy)] pub struct ParallelConfig { pub num_workers: usize, pub queue_size: usize, pub batch_size: usize, pub timeout_ms: u64, } /// A TCP-focused passive fingerprinting analyzer. /// /// The `HuginnNetTcp` struct handles TCP packet analysis for OS fingerprinting, /// MTU detection, and uptime calculation using p0f-style methodologies. /// /// Supports both sequential (single-threaded) and parallel (multi-threaded) processing modes. pub struct HuginnNetTcp { matcher: Option<Arc<db::Database>>, max_connections: usize, parallel_config: Option<ParallelConfig>, worker_pool: Option<Arc<WorkerPool>>, filter_config: Option<FilterConfig>, } impl HuginnNetTcp { /// Creates a new instance of `HuginnNetTcp` in sequential mode. /// /// # Parameters /// - `database`: Optional signature database for OS matching /// - `max_connections`: Maximum number of connections to track in the connection tracker /// /// # Returns /// A new `HuginnNetTcp` instance ready for sequential TCP analysis. pub fn new( database: Option<Arc<db::Database>>, max_connections: usize, ) -> Result<Self, HuginnNetTcpError> { Ok(Self { matcher: database, max_connections, parallel_config: None, worker_pool: None, filter_config: None, }) } /// Creates a new instance of `HuginnNetTcp` configured for parallel processing. /// /// Uses hash-based worker assignment to ensure packets from the same source IP /// always go to the same worker, maintaining state consistency. /// /// # Parameters /// - `database`: Optional signature database for OS matching /// - `max_connections`: Maximum number of connections to track per worker (typical: 1000) /// - `num_workers`: Number of worker threads (recommended: 2-4 on 8-core systems) /// - `queue_size`: Size of packet queue per worker (typical: 100-200) /// - `batch_size`: Maximum packets to process in one batch (typical: 16-64, recommended: 32) /// - `timeout_ms`: Worker receive timeout in milliseconds (typical: 5-50, recommended: 10) /// /// # Configuration Guide /// /// ## batch_size /// - **Low (8-16)**: Lower latency, more responsive, higher overhead /// - **Medium (32)**: Balanced throughput and latency *(recommended)* /// - **High (64-128)**: Maximum throughput, higher latency /// /// ## timeout_ms /// - **Low (5-10ms)**: Fast shutdown, slightly lower throughput *(recommended: 10)* /// - **Medium (20-50ms)**: Better throughput, slower shutdown /// - **High (100ms+)**: Maximum throughput, slow shutdown /// /// # Example /// ```rust,no_run /// use huginn_net_tcp::HuginnNetTcp; /// use huginn_net_db::Database; /// use std::sync::Arc; /// /// let db = Arc::new(Database::load_default().expect("Failed to load database")); /// /// // Balanced configuration (recommended) /// let tcp = HuginnNetTcp::with_config(Some(db.clone()), 1000, 4, 100, 32, 10); /// /// // Low latency /// let low_latency = HuginnNetTcp::with_config(Some(db.clone()), 1000, 2, 100, 8, 5); /// /// // High throughput /// let high_throughput = HuginnNetTcp::with_config(Some(db), 1000, 4, 200, 64, 20); /// ``` /// /// # Returns /// A new `HuginnNetTcp` instance configured for parallel processing. pub fn with_config( database: Option<Arc<db::Database>>, max_connections: usize, num_workers: usize, queue_size: usize, batch_size: usize, timeout_ms: u64, ) -> Result<Self, HuginnNetTcpError> { Ok(Self { matcher: database, max_connections, parallel_config: Some(ParallelConfig { num_workers, queue_size, batch_size, timeout_ms, }), worker_pool: None, filter_config: None, }) } /// Configure packet filtering (builder pattern) pub fn with_filter(mut self, config: FilterConfig) -> Self { self.filter_config = Some(config); self } /// Initializes the worker pool for parallel processing. /// /// Must be called before `analyze_network` or `analyze_pcap` when using parallel mode. /// /// # Parameters /// - `sender`: Channel to send TCP analysis results /// /// # Errors /// Returns error if called without parallel config or if worker pool creation fails. pub fn init_pool( &mut self, sender: Sender<TcpAnalysisResult>, ) -> Result<(), HuginnNetTcpError> { let config = self .parallel_config .ok_or(HuginnNetTcpError::Misconfiguration( "Parallel mode not configured. Use with_config() to enable parallel processing" .to_string(), ))?; // Clone Arc for sharing across threads (cheap, just increments ref count) let database_arc = self.matcher.as_ref().map(Arc::clone); let worker_pool = WorkerPool::new( config.num_workers, config.queue_size, config.batch_size, config.timeout_ms, sender, database_arc, self.max_connections, self.filter_config.clone(), )?; self.worker_pool = Some(Arc::new(worker_pool)); Ok(()) } /// Returns a reference to the worker pool. /// /// # Returns /// An `Option` containing an `Arc` to the `WorkerPool` if parallel mode is enabled. pub fn worker_pool(&self) -> Option<Arc<WorkerPool>> { self.worker_pool.as_ref().map(Arc::clone) } /// Returns current pool statistics (parallel mode only). /// /// # Returns /// `Some(PoolStats)` if in parallel mode, `None` otherwise. pub fn stats(&self) -> Option<PoolStats> { self.worker_pool.as_ref().map(|pool| pool.stats()) } fn process_with<F>( &mut self, packet_fn: F, sender: Sender<TcpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetTcpError> where F: FnMut() -> Option<Result<Vec<u8>, HuginnNetTcpError>>, { if self.parallel_config.is_some() { self.process_parallel(packet_fn, sender, cancel_signal) } else { self.process_sequential(packet_fn, sender, cancel_signal) } } fn process_sequential<F>( &mut self, mut packet_fn: F, sender: Sender<TcpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetTcpError> where F: FnMut() -> Option<Result<Vec<u8>, HuginnNetTcpError>>, { // Connection tracker for TCP analysis (sequential mode) let mut connection_tracker = TtlCache::new(self.max_connections); while let Some(packet_result) = packet_fn() { if let Some(ref cancel) = cancel_signal { if cancel.load(Ordering::Relaxed) { debug!("Cancellation signal received, stopping packet processing"); break; } } match packet_result { Ok(packet) => match self.process_packet(&packet, &mut connection_tracker) { Ok(result) => { if sender.send(result).is_err() { error!("Receiver dropped, stopping packet processing"); break; } } Err(tcp_error) => { debug!("Error processing packet: {tcp_error}"); } }, Err(e) => { error!("Failed to read packet: {e}"); } } } Ok(()) } fn process_parallel<F>( &mut self, mut packet_fn: F, _sender: Sender<TcpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetTcpError> where F: FnMut() -> Option<Result<Vec<u8>, HuginnNetTcpError>>, { let worker_pool = self .worker_pool .as_ref() .ok_or(HuginnNetTcpError::Misconfiguration( "Worker pool not initialized. Call init_pool() before processing".to_string(), ))?; while let Some(packet_result) = packet_fn() { if let Some(ref cancel) = cancel_signal { if cancel.load(Ordering::Relaxed) { debug!("Cancellation signal received, stopping packet processing"); break; } } match packet_result { Ok(packet) => { // Dispatch to worker pool using hash-based assignment worker_pool.dispatch(packet); } Err(e) => { error!("Failed to read packet: {e}"); } } } // Signal workers to finish worker_pool.shutdown(); Ok(()) } /// Analyzes network traffic from a live network interface for TCP packets. /// /// # Parameters /// - `interface_name`: The name of the network interface to capture from. /// - `sender`: A channel sender to send analysis results. /// - `cancel_signal`: Optional atomic boolean to signal cancellation. /// /// # Returns /// A `Result` indicating success or failure. pub fn analyze_network( &mut self, interface_name: &str, sender: Sender<TcpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetTcpError> { let interfaces = datalink::interfaces(); let interface = interfaces .into_iter() .find(|iface| iface.name == interface_name) .ok_or_else(|| { HuginnNetTcpError::Parse(format!( "Could not find network interface: {interface_name}" )) })?; debug!("Using network interface: {}", interface.name); let config = Config { promiscuous: true, ..Config::default() }; let (_tx, mut rx) = match datalink::channel(&interface, config) { Ok(Channel::Ethernet(tx, rx)) => (tx, rx), Ok(_) => return Err(HuginnNetTcpError::Parse("Unhandled channel type".to_string())), Err(e) => { return Err(HuginnNetTcpError::Parse(format!("Unable to create channel: {e}"))) } }; self.process_with( move || match rx.next() { Ok(packet) => Some(Ok(packet.to_vec())), Err(e) => { Some(Err(HuginnNetTcpError::Parse(format!("Error receiving packet: {e}")))) } }, sender, cancel_signal, ) } /// Analyzes TCP packets from a PCAP file. /// /// # Parameters /// - `pcap_path`: Path to the PCAP file to analyze. /// - `sender`: A channel sender to send analysis results. /// - `cancel_signal`: Optional atomic boolean to signal cancellation. /// /// # Returns /// A `Result` indicating success or failure. pub fn analyze_pcap( &mut self, pcap_path: &str, sender: Sender<TcpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetTcpError> { let file = File::open(pcap_path) .map_err(|e| HuginnNetTcpError::Parse(format!("Failed to open PCAP file: {e}")))?; let mut pcap_reader = PcapReader::new(file) .map_err(|e| HuginnNetTcpError::Parse(format!("Failed to create PCAP reader: {e}")))?; self.process_with( move || match pcap_reader.next_packet() { Some(Ok(packet)) => Some(Ok(packet.data.to_vec())), Some(Err(e)) => { Some(Err(HuginnNetTcpError::Parse(format!("Error reading PCAP packet: {e}")))) } None => None, }, sender, cancel_signal, ) } /// Processes a single packet and extracts TCP information if present. /// /// # Parameters /// - `packet`: The raw packet data. /// - `connection_tracker`: Mutable reference to connection tracker. /// /// # Returns /// A `Result` containing a `TcpAnalysisResult` or an error. fn process_packet( &self, packet: &[u8], connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, ) -> Result<TcpAnalysisResult, HuginnNetTcpError> { if let Some(ref filter) = self.filter_config { if !raw_filter::apply(packet, filter) { debug!("Filtered out packet before parsing"); return Ok(TcpAnalysisResult { syn: None, syn_ack: None, mtu: None, client_uptime: None, server_uptime: None, }); } } let matcher = self .matcher .as_ref() .map(|db| SignatureMatcher::new(db.as_ref())); match parse_packet(packet) { IpPacket::Ipv4(ipv4) => { process_ipv4_packet(&ipv4, connection_tracker, matcher.as_ref()) } IpPacket::Ipv6(ipv6) => { process_ipv6_packet(&ipv6, connection_tracker, matcher.as_ref()) } IpPacket::None => Ok(TcpAnalysisResult { syn: None, syn_ack: None, mtu: None, client_uptime: None, server_uptime: None, }), } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/process.rs
huginn-net-tcp/src/process.rs
use crate::error::HuginnNetTcpError; use crate::output::{ IpPort, MTUOutput, MTUQualityMatched, OSQualityMatched, OperativeSystem, SynAckTCPOutput, SynTCPOutput, UptimeOutput, }; use crate::{ tcp_process, ConnectionKey, SignatureMatcher, TcpAnalysisResult, TcpTimestamp, UptimeRole, }; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use pnet::packet::tcp::TcpPacket; use pnet::packet::Packet; use std::net::IpAddr; use ttl_cache::TtlCache; pub struct ObservablePackage { pub source: IpPort, pub destination: IpPort, pub tcp_result: TcpAnalysisResult, } /// Processes an IPv4 packet for TCP content. pub fn process_ipv4_packet( ipv4: &Ipv4Packet, connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, matcher: Option<&SignatureMatcher>, ) -> Result<TcpAnalysisResult, HuginnNetTcpError> { create_observable_package_ipv4(ipv4, connection_tracker, matcher).map(|pkg| pkg.tcp_result) } fn create_observable_package_ipv4( ipv4: &Ipv4Packet, connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, matcher: Option<&SignatureMatcher>, ) -> Result<ObservablePackage, HuginnNetTcpError> { let tcp = TcpPacket::new(ipv4.payload()) .ok_or_else(|| HuginnNetTcpError::Parse("Invalid TCP packet".to_string()))?; let source = IpPort { ip: IpAddr::V4(ipv4.get_source()), port: tcp.get_source() }; let destination = IpPort { ip: IpAddr::V4(ipv4.get_destination()), port: tcp.get_destination() }; let tcp_package = tcp_process::process_tcp_ipv4(ipv4, connection_tracker)?; let mut tcp_result = TcpAnalysisResult { syn: None, syn_ack: None, mtu: None, client_uptime: None, server_uptime: None, }; if let Some(tcp_request) = tcp_package.tcp_request { let os_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_tcp_request(&tcp_request) { OSQualityMatched { os: Some(OperativeSystem::from(label)), quality: crate::db::MatchQualityType::Matched(quality), } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::NotMatched } } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::Disabled } }; let syn_output = SynTCPOutput { source: IpPort::new(IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V4(ipv4.get_destination()), tcp.get_destination()), os_matched: os_quality, sig: tcp_request, }; tcp_result.syn = Some(syn_output); } if let Some(tcp_response) = tcp_package.tcp_response { let os_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_tcp_response(&tcp_response) { OSQualityMatched { os: Some(OperativeSystem::from(label)), quality: crate::db::MatchQualityType::Matched(quality), } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::NotMatched } } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::Disabled } }; let syn_ack_output = SynAckTCPOutput { source: IpPort::new(IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V4(ipv4.get_destination()), tcp.get_destination()), os_matched: os_quality, sig: tcp_response, }; tcp_result.syn_ack = Some(syn_ack_output); } if let Some(mtu) = tcp_package.mtu { let link_quality = if let Some(matcher) = matcher { if let Some((link, _)) = matcher.matching_by_mtu(&mtu.value) { MTUQualityMatched { link: Some(link.clone()), quality: crate::db::MatchQualityType::Matched(1.0), } } else { MTUQualityMatched { link: None, quality: crate::db::MatchQualityType::NotMatched } } } else { MTUQualityMatched { link: None, quality: crate::db::MatchQualityType::Disabled } }; let mtu_output = MTUOutput { source: IpPort::new(IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V4(ipv4.get_destination()), tcp.get_destination()), link: link_quality, mtu: mtu.value, }; tcp_result.mtu = Some(mtu_output); } if let Some(uptime) = tcp_package.client_uptime { let uptime_output = UptimeOutput { source: IpPort::new(IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V4(ipv4.get_destination()), tcp.get_destination()), role: UptimeRole::Client, days: uptime.days, hours: uptime.hours, min: uptime.min, up_mod_days: uptime.up_mod_days, freq: uptime.freq, }; tcp_result.client_uptime = Some(uptime_output); } if let Some(uptime) = tcp_package.server_uptime { let uptime_output = UptimeOutput { source: IpPort::new(IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V4(ipv4.get_destination()), tcp.get_destination()), role: UptimeRole::Server, days: uptime.days, hours: uptime.hours, min: uptime.min, up_mod_days: uptime.up_mod_days, freq: uptime.freq, }; tcp_result.server_uptime = Some(uptime_output); } Ok(ObservablePackage { source, destination, tcp_result }) } /// Processes an IPv6 packet for TCP content. pub fn process_ipv6_packet( ipv6: &Ipv6Packet, connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, matcher: Option<&SignatureMatcher>, ) -> Result<TcpAnalysisResult, HuginnNetTcpError> { create_observable_package_ipv6(ipv6, connection_tracker, matcher).map(|pkg| pkg.tcp_result) } fn create_observable_package_ipv6( ipv6: &Ipv6Packet, connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, matcher: Option<&SignatureMatcher>, ) -> Result<ObservablePackage, HuginnNetTcpError> { // Extract TCP info for source/destination ports let tcp = TcpPacket::new(ipv6.payload()) .ok_or_else(|| HuginnNetTcpError::Parse("Invalid TCP packet".to_string()))?; let source = IpPort { ip: IpAddr::V6(ipv6.get_source()), port: tcp.get_source() }; let destination = IpPort { ip: IpAddr::V6(ipv6.get_destination()), port: tcp.get_destination() }; let tcp_package = tcp_process::process_tcp_ipv6(ipv6, connection_tracker)?; let mut tcp_result = TcpAnalysisResult { syn: None, syn_ack: None, mtu: None, client_uptime: None, server_uptime: None, }; // Process TCP request (SYN) if let Some(tcp_request) = tcp_package.tcp_request { let os_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_tcp_request(&tcp_request) { OSQualityMatched { os: Some(OperativeSystem::from(label)), quality: crate::db::MatchQualityType::Matched(quality), } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::NotMatched } } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::Disabled } }; let syn_output = SynTCPOutput { source: IpPort::new(IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V6(ipv6.get_destination()), tcp.get_destination()), os_matched: os_quality, sig: tcp_request, }; tcp_result.syn = Some(syn_output); } // Process TCP response (SYN-ACK) if let Some(tcp_response) = tcp_package.tcp_response { let os_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_tcp_response(&tcp_response) { OSQualityMatched { os: Some(OperativeSystem::from(label)), quality: crate::db::MatchQualityType::Matched(quality), } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::NotMatched } } } else { OSQualityMatched { os: None, quality: crate::db::MatchQualityType::Disabled } }; let syn_ack_output = SynAckTCPOutput { source: IpPort::new(IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V6(ipv6.get_destination()), tcp.get_destination()), os_matched: os_quality, sig: tcp_response, }; tcp_result.syn_ack = Some(syn_ack_output); } // Process MTU if let Some(mtu) = tcp_package.mtu { let link_quality = if let Some(matcher) = matcher { if let Some((link, _)) = matcher.matching_by_mtu(&mtu.value) { MTUQualityMatched { link: Some(link.clone()), quality: crate::db::MatchQualityType::Matched(1.0), } } else { MTUQualityMatched { link: None, quality: crate::db::MatchQualityType::NotMatched } } } else { MTUQualityMatched { link: None, quality: crate::db::MatchQualityType::Disabled } }; let mtu_output = MTUOutput { source: IpPort::new(IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V6(ipv6.get_destination()), tcp.get_destination()), link: link_quality, mtu: mtu.value, }; tcp_result.mtu = Some(mtu_output); } // Process client uptime if let Some(uptime) = tcp_package.client_uptime { let uptime_output = UptimeOutput { source: IpPort::new(IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V6(ipv6.get_destination()), tcp.get_destination()), role: UptimeRole::Client, days: uptime.days, hours: uptime.hours, min: uptime.min, up_mod_days: uptime.up_mod_days, freq: uptime.freq, }; tcp_result.client_uptime = Some(uptime_output); } // Process server uptime if let Some(uptime) = tcp_package.server_uptime { let uptime_output = UptimeOutput { source: IpPort::new(IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new(IpAddr::V6(ipv6.get_destination()), tcp.get_destination()), role: UptimeRole::Server, days: uptime.days, hours: uptime.hours, min: uptime.min, up_mod_days: uptime.up_mod_days, freq: uptime.freq, }; tcp_result.server_uptime = Some(uptime_output); } Ok(ObservablePackage { source, destination, tcp_result }) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/mtu.rs
huginn-net-tcp/src/mtu.rs
use crate::observable::ObservableMtu; use pnet::packet::tcp::TcpFlags::SYN; use pnet::packet::tcp::TcpPacket; use tracing::debug; fn from_client(tcp: &TcpPacket) -> bool { tcp.get_flags() & SYN == SYN } pub fn extract_from_ipv4(tcp: &TcpPacket, ipv4_header_len: u8, mss: u16) -> Option<ObservableMtu> { if from_client(tcp) { let ip_header_len = (ipv4_header_len as u16).saturating_mul(4); // convert to bytes let mut tcp_header_len = (tcp.get_data_offset() as u16).saturating_mul(4); // convert to bytes if tcp_header_len > 20 { // If TCP header contains options tcp_header_len = tcp_header_len.saturating_sub(20); } let mtu_value = mss .saturating_add(ip_header_len) .saturating_add(tcp_header_len); debug!( "MTU ipv4 {} - mss: {} - ip_header_len: {} - tcp_header_len: {}", mtu_value, mss, ip_header_len, tcp_header_len ); Some(ObservableMtu { value: mtu_value }) } else { None } } pub fn extract_from_ipv6(tcp: &TcpPacket, ipv6_header_len: u8, mss: u16) -> Option<ObservableMtu> { if from_client(tcp) { let ip_header_len = ipv6_header_len as u16; // ipv6_header_len is in bytes already let mut tcp_header_len = (tcp.get_data_offset() as u16).saturating_mul(4); // convert to bytes if tcp_header_len > 20 { // If TCP header contains options tcp_header_len = tcp_header_len.saturating_sub(20); } let mtu_value = mss .saturating_add(ip_header_len) .saturating_add(tcp_header_len); debug!( "MTU ipv6 {} - mss: {} - ip_header_len: {} - tcp_header_len: {}", mtu_value, mss, ip_header_len, tcp_header_len ); Some(ObservableMtu { value: mtu_value }) } else { None } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/filter.rs
huginn-net-tcp/src/filter.rs
use pnet::ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; /// Filter mode: Allow (allowlist) or Deny (denylist) #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum FilterMode { /// Allow only matching packets (allowlist mode) #[default] Allow, /// Deny matching packets (denylist mode) Deny, } /// Port filter configuration /// /// Filters packets based on TCP source and/or destination ports. /// Supports individual ports, ranges, and lists. /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// // Single port /// let filter = PortFilter::new().destination(443); /// /// // Multiple ports /// let filter = PortFilter::new().destination_list(vec![80, 443, 8080]); /// /// // Port range /// let filter = PortFilter::new().destination_range(8000..9000); /// ``` #[derive(Debug, Clone, Default)] pub struct PortFilter { /// Source ports to match pub source_ports: Vec<u16>, /// Destination ports to match pub destination_ports: Vec<u16>, /// Source port ranges (inclusive) pub source_ranges: Vec<(u16, u16)>, /// Destination port ranges (inclusive) pub destination_ranges: Vec<(u16, u16)>, /// Match ANY port (source OR destination)? pub match_any: bool, } impl PortFilter { /// Create a new empty port filter pub fn new() -> Self { Self::default() } /// Add a destination port /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// let filter = PortFilter::new().destination(443); /// ``` pub fn destination(mut self, port: u16) -> Self { self.destination_ports.push(port); self } /// Add a source port /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// let filter = PortFilter::new().source(12345); /// ``` pub fn source(mut self, port: u16) -> Self { self.source_ports.push(port); self } /// Add a destination port range (inclusive) /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// let filter = PortFilter::new().destination_range(8000..9000); /// // Matches ports 8000 through 8999 /// ``` pub fn destination_range(mut self, range: std::ops::Range<u16>) -> Self { self.destination_ranges .push((range.start, range.end.saturating_sub(1))); self } /// Add a source port range (inclusive) /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// let filter = PortFilter::new().source_range(10000..20000); /// // Matches ports 10000 through 19999 /// ``` pub fn source_range(mut self, range: std::ops::Range<u16>) -> Self { self.source_ranges .push((range.start, range.end.saturating_sub(1))); self } /// Add multiple destination ports /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// let filter = PortFilter::new().destination_list(vec![80, 443, 8080, 8443]); /// ``` pub fn destination_list(mut self, ports: Vec<u16>) -> Self { self.destination_ports.extend(ports); self } /// Add multiple source ports /// /// # Examples /// /// ```rust /// use huginn_net_tcp::PortFilter; /// /// let filter = PortFilter::new().source_list(vec![12345, 54321, 9999]); /// ``` pub fn source_list(mut self, ports: Vec<u16>) -> Self { self.source_ports.extend(ports); self } /// Match if ANY port matches (source OR destination) /// /// By default, all specified filters must match. With `match_any()`, /// the packet passes if either source OR destination matches. pub fn any_port(mut self) -> Self { self.match_any = true; self } /// Check if packet matches port filter /// /// # Returns /// /// `true` if the packet matches the filter criteria pub fn matches(&self, src_port: u16, dst_port: u16) -> bool { if self.match_any { let all_ports: Vec<u16> = self .source_ports .iter() .chain(self.destination_ports.iter()) .copied() .collect(); let all_ranges: Vec<(u16, u16)> = self .source_ranges .iter() .chain(self.destination_ranges.iter()) .copied() .collect(); let port_match = all_ports.contains(&src_port) || all_ports.contains(&dst_port) || all_ranges .iter() .any(|(start, end)| src_port >= *start && src_port <= *end) || all_ranges .iter() .any(|(start, end)| dst_port >= *start && dst_port <= *end); port_match } else { let src_match = self.source_ports.contains(&src_port) || self .source_ranges .iter() .any(|(start, end)| src_port >= *start && src_port <= *end); let dst_match = self.destination_ports.contains(&dst_port) || self .destination_ranges .iter() .any(|(start, end)| dst_port >= *start && dst_port <= *end); let src_ok = self.source_ports.is_empty() && self.source_ranges.is_empty() || src_match; let dst_ok = self.destination_ports.is_empty() && self.destination_ranges.is_empty() || dst_match; src_ok && dst_ok } } } /// IP address filter configuration /// /// Filters packets based on specific IPv4 or IPv6 addresses. /// /// # Examples /// /// ```rust /// use huginn_net_tcp::IpFilter; /// /// let filter = IpFilter::new() /// .allow("8.8.8.8") /// .unwrap() /// .allow("2001:4860:4860::8888") /// .unwrap(); /// ``` #[derive(Debug, Clone, Default)] pub struct IpFilter { /// IPv4 addresses to match pub ipv4_addresses: Vec<Ipv4Addr>, /// IPv6 addresses to match pub ipv6_addresses: Vec<Ipv6Addr>, /// Check source, destination, or both? pub check_source: bool, pub check_destination: bool, } impl IpFilter { /// Create a new IP filter that checks both source and destination by default pub fn new() -> Self { Self { check_source: true, check_destination: true, ..Default::default() } } /// Add an IP address (auto-detects IPv4/IPv6) /// /// # Errors /// /// Returns an error if the IP address string is invalid /// /// # Examples /// /// ```rust /// use huginn_net_tcp::IpFilter; /// /// let filter = IpFilter::new() /// .allow("192.168.1.1").unwrap() /// .allow("2001:db8::1").unwrap(); /// ``` pub fn allow(mut self, ip: &str) -> Result<Self, String> { let addr: IpAddr = ip.parse().map_err(|e| format!("Invalid IP: {e}"))?; match addr { IpAddr::V4(v4) => self.ipv4_addresses.push(v4), IpAddr::V6(v6) => self.ipv6_addresses.push(v6), } Ok(self) } /// Add multiple IP addresses /// /// # Errors /// /// Returns an error if any IP address string is invalid /// /// # Examples /// /// ```rust /// use huginn_net_tcp::IpFilter; /// /// let filter = IpFilter::new() /// .allow_list(vec!["8.8.8.8", "1.1.1.1", "2001:4860:4860::8888"]) /// .unwrap(); /// ``` pub fn allow_list(mut self, ips: Vec<&str>) -> Result<Self, String> { for ip in ips { self = self.allow(ip)?; } Ok(self) } /// Only check source addresses /// /// By default, both source and destination are checked. pub fn source_only(mut self) -> Self { self.check_source = true; self.check_destination = false; self } /// Only check destination addresses /// /// By default, both source and destination are checked. pub fn destination_only(mut self) -> Self { self.check_source = false; self.check_destination = true; self } /// Check if packet matches IP filter /// /// # Returns /// /// `true` if either source or destination IP matches (if enabled) pub fn matches(&self, src_ip: &IpAddr, dst_ip: &IpAddr) -> bool { let src_match = if self.check_source { match src_ip { IpAddr::V4(v4) => self.ipv4_addresses.contains(v4), IpAddr::V6(v6) => self.ipv6_addresses.contains(v6), } } else { false }; let dst_match = if self.check_destination { match dst_ip { IpAddr::V4(v4) => self.ipv4_addresses.contains(v4), IpAddr::V6(v6) => self.ipv6_addresses.contains(v6), } } else { false }; src_match || dst_match } } /// Subnet filter configuration (CIDR notation) /// /// Filters packets based on subnet membership using CIDR notation. /// Supports both IPv4 and IPv6 subnets. /// /// # Examples /// /// ```rust /// use huginn_net_tcp::SubnetFilter; /// /// // Allow only private networks /// let filter = SubnetFilter::new() /// .allow("192.168.0.0/16").unwrap() /// .allow("10.0.0.0/8").unwrap(); /// /// // IPv6 subnet /// let filter = SubnetFilter::new() /// .allow("2001:db8::/32").unwrap(); /// ``` #[derive(Debug, Clone, Default)] pub struct SubnetFilter { /// IPv4 subnets to match pub ipv4_subnets: Vec<Ipv4Network>, /// IPv6 subnets to match pub ipv6_subnets: Vec<Ipv6Network>, /// Check source, destination, or both? pub check_source: bool, pub check_destination: bool, } impl SubnetFilter { /// Create a new subnet filter that checks both source and destination by default pub fn new() -> Self { Self { check_source: true, check_destination: true, ..Default::default() } } /// Add a subnet in CIDR notation /// /// # Errors /// /// Returns an error if the CIDR notation is invalid /// /// # Examples /// /// ```rust /// use huginn_net_tcp::SubnetFilter; /// /// let filter = SubnetFilter::new() /// .allow("192.168.1.0/24").unwrap(); /// ``` pub fn allow(mut self, cidr: &str) -> Result<Self, String> { let network: IpNetwork = cidr.parse().map_err(|e| format!("Invalid CIDR: {e}"))?; match network { IpNetwork::V4(v4) => self.ipv4_subnets.push(v4), IpNetwork::V6(v6) => self.ipv6_subnets.push(v6), } Ok(self) } /// Add multiple subnets /// /// # Errors /// /// Returns an error if any CIDR notation is invalid /// /// # Examples /// /// ```rust /// use huginn_net_tcp::SubnetFilter; /// /// let filter = SubnetFilter::new() /// .allow_list(vec!["192.168.0.0/16", "10.0.0.0/8", "172.16.0.0/12"]) /// .unwrap(); /// ``` pub fn allow_list(mut self, cidrs: Vec<&str>) -> Result<Self, String> { for cidr in cidrs { self = self.allow(cidr)?; } Ok(self) } /// Only check source addresses /// /// By default, both source and destination are checked. pub fn source_only(mut self) -> Self { self.check_source = true; self.check_destination = false; self } /// Only check destination addresses /// /// By default, both source and destination are checked. pub fn destination_only(mut self) -> Self { self.check_source = false; self.check_destination = true; self } /// Check if packet matches subnet filter /// /// # Returns /// /// `true` if either source or destination IP is in any of the subnets (if enabled) pub fn matches(&self, src_ip: &IpAddr, dst_ip: &IpAddr) -> bool { let src_match = if self.check_source { match src_ip { IpAddr::V4(v4) => self.ipv4_subnets.iter().any(|net| net.contains(*v4)), IpAddr::V6(v6) => self.ipv6_subnets.iter().any(|net| net.contains(*v6)), } } else { false }; let dst_match = if self.check_destination { match dst_ip { IpAddr::V4(v4) => self.ipv4_subnets.iter().any(|net| net.contains(*v4)), IpAddr::V6(v6) => self.ipv6_subnets.iter().any(|net| net.contains(*v6)), } } else { false }; src_match || dst_match } } /// Combined filter configuration /// /// Combines port, IP, and subnet filters with a filter mode (Allow/Deny). /// All enabled filters must pass for a packet to be processed. /// /// # Examples /// /// ```rust /// use huginn_net_tcp::{FilterConfig, FilterMode, PortFilter, SubnetFilter}; /// /// let filter = FilterConfig::new() /// .mode(FilterMode::Allow) /// .with_port_filter(PortFilter::new().destination(443)) /// .with_subnet_filter( /// SubnetFilter::new() /// .allow("192.168.0.0/16") /// .unwrap() /// ); /// ``` #[derive(Debug, Clone, Default)] pub struct FilterConfig { pub port_filter: Option<PortFilter>, pub ip_filter: Option<IpFilter>, pub subnet_filter: Option<SubnetFilter>, pub mode: FilterMode, } impl FilterConfig { /// Create a new empty filter configuration pub fn new() -> Self { Self::default() } /// Set filter mode (Allow/Deny) /// /// # Examples /// /// ``` /// use huginn_net_tcp::{FilterConfig, FilterMode}; /// /// // Allowlist mode (default) - only matching packets pass /// let filter = FilterConfig::new().mode(FilterMode::Allow); /// /// // Denylist mode - matching packets are blocked /// let filter = FilterConfig::new().mode(FilterMode::Deny); /// ``` pub fn mode(mut self, mode: FilterMode) -> Self { self.mode = mode; self } /// Add port filter /// /// # Examples /// /// ```rust /// use huginn_net_tcp::{FilterConfig, PortFilter}; /// /// let filter = FilterConfig::new() /// .with_port_filter(PortFilter::new().destination(443)); /// ``` pub fn with_port_filter(mut self, filter: PortFilter) -> Self { self.port_filter = Some(filter); self } /// Add IP filter /// /// # Examples /// /// ```rust /// use huginn_net_tcp::{FilterConfig, IpFilter}; /// /// let filter = FilterConfig::new() /// .with_ip_filter( /// IpFilter::new() /// .allow("8.8.8.8") /// .unwrap() /// ); /// ``` pub fn with_ip_filter(mut self, filter: IpFilter) -> Self { self.ip_filter = Some(filter); self } /// Add subnet filter /// /// # Examples /// /// ```rust /// use huginn_net_tcp::{FilterConfig, SubnetFilter}; /// /// let filter = FilterConfig::new() /// .with_subnet_filter( /// SubnetFilter::new() /// .allow("192.168.0.0/16") /// .unwrap() /// ); /// ``` pub fn with_subnet_filter(mut self, filter: SubnetFilter) -> Self { self.subnet_filter = Some(filter); self } /// Check if packet should be processed based on filters (userspace filtering) /// /// This method performs filtering in userspace after packets reach the application. /// It extracts IP addresses and ports from packet headers and applies the configured /// filters (port, IP, subnet) according to the filter mode (Allow/Deny). /// /// # Returns /// /// - `true`: Packet passes all filters (should be processed) /// - `false`: Packet blocked by filters (should be dropped) /// /// # Logic /// /// - If no filters are configured, all packets pass /// - In Allow mode: packet must match ALL configured filters /// - In Deny mode: packet must NOT match ALL configured filters pub fn should_process( &self, src_ip: &IpAddr, dst_ip: &IpAddr, src_port: u16, dst_port: u16, ) -> bool { if self.port_filter.is_none() && self.ip_filter.is_none() && self.subnet_filter.is_none() { return true; } match self.mode { FilterMode::Allow => { if let Some(ref filter) = self.port_filter { if !filter.matches(src_port, dst_port) { return false; } } if let Some(ref filter) = self.ip_filter { if !filter.matches(src_ip, dst_ip) { return false; } } if let Some(ref filter) = self.subnet_filter { if !filter.matches(src_ip, dst_ip) { return false; } } true } FilterMode::Deny => { let mut all_match = true; if let Some(ref filter) = self.port_filter { all_match = all_match && filter.matches(src_port, dst_port); } if let Some(ref filter) = self.ip_filter { all_match = all_match && filter.matches(src_ip, dst_ip); } if let Some(ref filter) = self.subnet_filter { all_match = all_match && filter.matches(src_ip, dst_ip); } !all_match } } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/packet_hash.rs
huginn-net-tcp/src/packet_hash.rs
use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; /// Hashes the source IP from a packet for worker assignment. /// /// Parses the IP header to extract source IP and returns its hash. /// This ensures packets from the same source always go to the same worker, /// maintaining connection state consistency. pub fn hash_source_ip(packet: &[u8]) -> usize { // Skip Ethernet header (14 bytes) if present // Both IPv4 (0x0800) and IPv6 (0x86DD) use same offset let ip_start: usize = if packet.len() > 14 && ((packet[12] == 0x08 && packet[13] == 0x00) || (packet[12] == 0x86 && packet[13] == 0xDD)) { 14 } else { 0 // Raw IP packet }; let min_length = ip_start.saturating_add(20); if packet.len() < min_length { // Packet too short, use fallback hash return fallback_hash(packet); } let ip_packet = &packet[ip_start..]; let version = (ip_packet[0] >> 4) & 0x0F; match version { 4 => { // IPv4: source IP at bytes 12-15 if ip_packet.len() >= 16 { let src_ip = &ip_packet[12..16]; hash_bytes(src_ip) } else { fallback_hash(packet) } } 6 => { // IPv6: source IP at bytes 8-23 if ip_packet.len() >= 24 { let src_ip = &ip_packet[8..24]; hash_bytes(src_ip) } else { fallback_hash(packet) } } _ => fallback_hash(packet), } } /// Hashes a byte slice using DefaultHasher. fn hash_bytes(bytes: &[u8]) -> usize { let mut hasher = DefaultHasher::new(); bytes.hash(&mut hasher); hasher.finish() as usize } /// Fallback hash for invalid packets. /// /// Used when a packet is too short, malformed, or has an unknown IP version. /// Instead of discarding the packet or crashing, we hash the entire packet contents /// to distribute it to a worker. This sacrifices per-connection state consistency /// for that specific packet, but ensures robustness in production environments /// with corrupted traffic, fragmentation issues, or malicious crafted packets. /// /// Note: This is specific to TCP's hash-based routing. fn fallback_hash(packet: &[u8]) -> usize { hash_bytes(packet) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/signature_matcher.rs
huginn-net-tcp/src/signature_matcher.rs
use crate::db::{db_matching_trait::FingerprintDb, Database, Label}; use crate::observable::ObservableTcp; pub struct SignatureMatcher<'a> { database: &'a Database, } impl<'a> SignatureMatcher<'a> { pub fn new(database: &'a Database) -> Self { Self { database } } pub fn matching_by_tcp_request( &self, signature: &ObservableTcp, ) -> Option<(&'a Label, &'a crate::tcp::Signature, f32)> { self.database .tcp_request .find_best_match(&signature.matching) } pub fn matching_by_tcp_response( &self, signature: &ObservableTcp, ) -> Option<(&'a Label, &'a crate::tcp::Signature, f32)> { self.database .tcp_response .find_best_match(&signature.matching) } pub fn matching_by_mtu(&self, mtu: &u16) -> Option<(&'a String, &'a u16)> { for (link, db_mtus) in &self.database.mtu { for db_mtu in db_mtus { if mtu == db_mtu { return Some((link, db_mtu)); } } } None } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/ip_options.rs
huginn-net-tcp/src/ip_options.rs
use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; pub struct IpOptions; use pnet::packet::Packet; /// Utility struct for handling IP header options and extension headers. /// Provides methods to calculate the length of optional headers in both IPv4 and IPv6 packets. impl IpOptions { pub fn calculate_ipv4_length(packet: &Ipv4Packet) -> u8 { // IHL (Internet Header Length) is in 32-bit words // Subtract minimum header length (20 bytes = 5 words) let ihl = packet.get_header_length(); let options_length: u8 = if ihl > 5 { // convert words to bytes ihl.saturating_sub(5).saturating_mul(4) } else { 0 // No options: standard header only }; options_length } pub fn calculate_ipv6_length(packet: &Ipv6Packet) -> u8 { // Most packets will be direct TCP if packet.get_next_header() == IpNextHeaderProtocols::Tcp { return 0; } let payload = packet.payload(); if payload.is_empty() { return 0; } let len = match packet.get_next_header() { IpNextHeaderProtocols::Ipv6Frag => 8, _ => { if payload.len() >= 2 { let header_len = payload[1] as usize; header_len .checked_add(1) .and_then(|sum| sum.checked_mul(8)) .unwrap_or(0) } else { 0 } } }; len as u8 } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/error.rs
huginn-net-tcp/src/error.rs
use thiserror::Error; #[derive(Error, Debug)] pub enum HuginnNetTcpError { #[error("Parse error: {0}")] Parse(String), #[error("Unsupported protocol: {0}")] UnsupportedProtocol(String), #[error("Invalid TCP flags: {0}")] InvalidTcpFlags(u8), #[error("Invalid package: {0}")] UnexpectedPackage(String), #[error("Misconfiguration: {0}")] Misconfiguration(String), }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/parallel.rs
huginn-net-tcp/src/parallel.rs
use crate::error::HuginnNetTcpError; use crate::filter::FilterConfig; use crate::output::TcpAnalysisResult; use crate::packet_hash; use crate::packet_parser::{parse_packet, IpPacket}; use crate::process::{process_ipv4_packet, process_ipv6_packet}; use crate::raw_filter; use crate::signature_matcher::SignatureMatcher; use crossbeam_channel::{bounded, Sender, TrySendError}; use std::num::NonZeroUsize; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; use ttl_cache::TtlCache; /// Worker configuration parameters. #[derive(Debug, Clone, Copy)] struct WorkerConfig { batch_size: usize, timeout_ms: u64, max_connections: usize, } /// Result of packet dispatch to worker queue. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum DispatchResult { /// Packet successfully queued for processing Queued, /// Packet dropped (queue full or pool shutdown) Dropped, } /// Statistics for a single worker. #[derive(Debug, Clone)] pub struct WorkerStats { /// Worker ID pub id: usize, /// Current queue size pub queue_size: usize, /// Number of packets dropped by this worker pub dropped: u64, } impl std::fmt::Display for WorkerStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Worker {}: queue_size={}, dropped={}", self.id, self.queue_size, self.dropped ) } } /// Statistics for the entire worker pool. #[derive(Debug, Clone)] pub struct PoolStats { /// Total packets dispatched pub total_dispatched: u64, /// Total packets dropped pub total_dropped: u64, /// Per-worker statistics pub workers: Vec<WorkerStats>, } impl std::fmt::Display for PoolStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!( f, "TCP Pool Stats - packets dispatched: {}, packets dropped: {}", self.total_dispatched, self.total_dropped )?; for worker in &self.workers { writeln!(f, " {worker}")?; } Ok(()) } } /// Worker pool for parallel TCP processing with hash-based dispatch. pub struct WorkerPool { _workers: Vec<thread::JoinHandle<()>>, packet_senders: Arc<Vec<Sender<Vec<u8>>>>, result_sender: Arc<Mutex<Option<std::sync::mpsc::Sender<TcpAnalysisResult>>>>, shutdown_flag: Arc<AtomicBool>, pub num_workers: NonZeroUsize, dispatched_count: AtomicU64, dropped_count: AtomicU64, worker_dropped: Vec<AtomicU64>, pub batch_size: usize, pub timeout_ms: u64, } impl WorkerPool { /// Creates a new worker pool for parallel TCP processing. /// /// # Parameters /// - `num_workers`: Number of worker threads /// - `queue_size`: Size of each worker's packet queue /// - `batch_size`: Number of packets to process before yielding /// - `timeout_ms`: Timeout in milliseconds for blocking receive /// - `result_sender`: Channel to send TCP analysis results /// - `database`: Optional database for OS fingerprinting (wrapped in Arc for thread sharing) /// - `max_connections`: Maximum connections to track per worker /// - `filter_config`: Optional filter configuration for packet filtering /// /// # Errors /// Returns error if `num_workers` is 0 or thread creation fails. #[allow(clippy::too_many_arguments)] pub fn new( num_workers: usize, queue_size: usize, batch_size: usize, timeout_ms: u64, result_sender: std::sync::mpsc::Sender<TcpAnalysisResult>, database: Option<Arc<crate::db::Database>>, max_connections: usize, filter_config: Option<FilterConfig>, ) -> Result<Self, HuginnNetTcpError> { let num_workers = NonZeroUsize::new(num_workers).ok_or( HuginnNetTcpError::Misconfiguration("Worker count must be greater than 0".to_string()), )?; let mut workers = Vec::new(); let mut packet_senders = Vec::new(); let mut worker_dropped = Vec::new(); let shutdown_flag = Arc::new(AtomicBool::new(false)); for worker_id in 0..num_workers.get() { let (tx, rx) = bounded::<Vec<u8>>(queue_size); packet_senders.push(tx); let result_sender_clone = result_sender.clone(); let dropped_counter = Arc::new(AtomicU64::new(0)); worker_dropped.push(Arc::clone(&dropped_counter)); let shutdown_flag_clone = Arc::clone(&shutdown_flag); let worker_database = database.as_ref().map(Arc::clone); let worker_filter = filter_config.clone(); let handle = thread::Builder::new() .name(format!("tcp-worker-{worker_id}")) .spawn(move || { Self::worker_loop( worker_id, rx, result_sender_clone, worker_database, shutdown_flag_clone, WorkerConfig { batch_size, timeout_ms, max_connections }, worker_filter, ); }) .map_err(|e| { HuginnNetTcpError::Misconfiguration(format!( "Failed to spawn worker thread: {e}" )) })?; workers.push(handle); } let worker_dropped_plain: Vec<AtomicU64> = worker_dropped .iter() .map(|arc| AtomicU64::new(arc.load(Ordering::Relaxed))) .collect(); Ok(Self { _workers: workers, packet_senders: Arc::new(packet_senders), result_sender: Arc::new(Mutex::new(Some(result_sender))), shutdown_flag, num_workers, dispatched_count: AtomicU64::new(0), dropped_count: AtomicU64::new(0), worker_dropped: worker_dropped_plain, batch_size, timeout_ms, }) } fn worker_loop( worker_id: usize, rx: crossbeam_channel::Receiver<Vec<u8>>, result_sender: std::sync::mpsc::Sender<TcpAnalysisResult>, database: Option<Arc<crate::db::Database>>, shutdown_flag: Arc<AtomicBool>, config: WorkerConfig, filter_config: Option<FilterConfig>, ) { use crossbeam_channel::RecvTimeoutError; use std::time::Duration; tracing::debug!("TCP worker {worker_id} starting"); // Each worker creates its own matcher from the shared database let matcher = database .as_ref() .map(|db| SignatureMatcher::new(db.as_ref())); // Each worker maintains its own connection tracker (state isolation) let mut connection_tracker = TtlCache::new(config.max_connections); let timeout = Duration::from_millis(config.timeout_ms); loop { if shutdown_flag.load(Ordering::Relaxed) { tracing::debug!("TCP worker {worker_id} received shutdown signal"); break; } // Blocking receive for first packet in batch let first_packet = match rx.recv_timeout(timeout) { Ok(packet) => packet, Err(RecvTimeoutError::Timeout) => { if shutdown_flag.load(Ordering::Relaxed) { tracing::debug!( "TCP worker {worker_id} received shutdown signal during timeout" ); break; } continue; } Err(RecvTimeoutError::Disconnected) => { tracing::debug!("TCP worker {worker_id}: channel disconnected"); break; } }; // Process first packet if !Self::process_packet( &first_packet, &mut connection_tracker, matcher.as_ref(), &result_sender, filter_config.as_ref(), ) { tracing::debug!("TCP worker {worker_id}: result channel closed"); break; } // Try to collect more packets for batch processing (non-blocking) for _ in 1..config.batch_size { match rx.try_recv() { Ok(packet) => { if !Self::process_packet( &packet, &mut connection_tracker, matcher.as_ref(), &result_sender, filter_config.as_ref(), ) { tracing::debug!("TCP worker {worker_id}: result channel closed"); return; } } Err(_) => break, // No more packets available, continue to next batch } } } tracing::debug!("TCP worker {worker_id} exiting"); } /// Processes a single packet and sends the result. /// Returns `false` if the result channel is closed (signal to exit). fn process_packet( packet: &[u8], connection_tracker: &mut TtlCache< crate::uptime::ConnectionKey, crate::uptime::TcpTimestamp, >, matcher: Option<&SignatureMatcher>, result_sender: &std::sync::mpsc::Sender<TcpAnalysisResult>, filter: Option<&FilterConfig>, ) -> bool { if let Some(filter_cfg) = filter { if !raw_filter::apply(packet, filter_cfg) { tracing::debug!("Filtered out packet before parsing"); return true; } } let result = match parse_packet(packet) { IpPacket::Ipv4(ipv4) => process_ipv4_packet(&ipv4, connection_tracker, matcher), IpPacket::Ipv6(ipv6) => process_ipv6_packet(&ipv6, connection_tracker, matcher), IpPacket::None => Ok(TcpAnalysisResult { syn: None, syn_ack: None, mtu: None, client_uptime: None, server_uptime: None, }), }; match result { Ok(analysis_result) => result_sender.send(analysis_result).is_ok(), Err(_e) => { tracing::debug!("Error processing packet: {_e}"); true // Continue processing despite error } } } /// Dispatches a packet to the appropriate worker based on source IP hash. /// /// Uses hash-based assignment to ensure packets from the same source IP /// always go to the same worker, maintaining state consistency. pub fn dispatch(&self, packet: Vec<u8>) -> DispatchResult { // Check if pool is shutting down if self.shutdown_flag.load(Ordering::Relaxed) { return DispatchResult::Dropped; } // Extract source IP for hashing let source_ip_hash = packet_hash::hash_source_ip(&packet); // NonZeroUsize guarantees num_workers.get() > 0 let worker_id = source_ip_hash .checked_rem(self.num_workers.get()) .unwrap_or(0); match self.packet_senders[worker_id].try_send(packet) { Ok(()) => { self.dispatched_count.fetch_add(1, Ordering::Relaxed); DispatchResult::Queued } Err(TrySendError::Full(_)) | Err(TrySendError::Disconnected(_)) => { self.dropped_count.fetch_add(1, Ordering::Relaxed); self.worker_dropped[worker_id].fetch_add(1, Ordering::Relaxed); DispatchResult::Dropped } } } pub fn stats(&self) -> PoolStats { let mut workers = Vec::new(); for (id, sender) in self.packet_senders.iter().enumerate() { workers.push(WorkerStats { id, queue_size: sender.len(), dropped: self.worker_dropped[id].load(Ordering::Relaxed), }); } PoolStats { total_dispatched: self.dispatched_count.load(Ordering::Relaxed), total_dropped: self.dropped_count.load(Ordering::Relaxed), workers, } } /// Initiates graceful shutdown of the worker pool. pub fn shutdown(&self) { self.shutdown_flag.store(true, Ordering::Relaxed); // Drop result sender to signal workers if let Ok(mut sender) = self.result_sender.lock() { *sender = None; } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/display.rs
huginn-net-tcp/src/display.rs
use crate::observable::ObservableTcp; use core::fmt; use huginn_net_db::tcp::{IpVersion, PayloadSize, Quirk, TcpOption, Ttl, WindowSize}; use std::fmt::Formatter; trait TcpDisplayFormat { fn get_version(&self) -> IpVersion; fn get_ittl(&self) -> Ttl; fn get_olen(&self) -> u8; fn get_mss(&self) -> Option<u16>; fn get_wsize(&self) -> WindowSize; fn get_wscale(&self) -> Option<u8>; fn get_olayout(&self) -> &[TcpOption]; fn get_quirks(&self) -> &[Quirk]; fn get_pclass(&self) -> PayloadSize; fn format_tcp_display(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}:{}:{}:", self.get_version(), self.get_ittl(), self.get_olen())?; if let Some(mss) = self.get_mss() { write!(f, "{mss}")?; } else { f.write_str("*")?; } write!(f, ":{},", self.get_wsize())?; if let Some(scale) = self.get_wscale() { write!(f, "{scale}")?; } else { f.write_str("*")?; } f.write_str(":")?; for (i, o) in self.get_olayout().iter().enumerate() { if i > 0 { f.write_str(",")?; } write!(f, "{o}")?; } f.write_str(":")?; for (i, q) in self.get_quirks().iter().enumerate() { if i > 0 { f.write_str(",")?; } write!(f, "{q}")?; } write!(f, ":{}", self.get_pclass()) } } impl TcpDisplayFormat for ObservableTcp { fn get_version(&self) -> IpVersion { self.matching.version } fn get_ittl(&self) -> Ttl { self.matching.ittl.clone() } fn get_olen(&self) -> u8 { self.matching.olen } fn get_mss(&self) -> Option<u16> { self.matching.mss } fn get_wsize(&self) -> WindowSize { self.matching.wsize.clone() } fn get_wscale(&self) -> Option<u8> { self.matching.wscale } fn get_olayout(&self) -> &[TcpOption] { &self.matching.olayout } fn get_quirks(&self) -> &[Quirk] { &self.matching.quirks } fn get_pclass(&self) -> PayloadSize { self.matching.pclass } } impl fmt::Display for ObservableTcp { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_tcp_display(f) } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/raw_filter.rs
huginn-net-tcp/src/raw_filter.rs
use crate::filter::FilterConfig; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use tracing::debug; /// Apply raw filter check on raw packet bytes /// /// Extracts only IPs and ports without creating full packet structures. /// This is much faster than parsing the entire packet first. /// /// # Returns /// /// - `true`: Packet should be processed (passed filter or no filter) /// - `false`: Packet should be dropped (failed filter) pub fn apply(packet: &[u8], filter: &FilterConfig) -> bool { if let Some((src_ip, dst_ip, src_port, dst_port)) = extract_quick_info(packet) { filter.should_process(&src_ip, &dst_ip, src_port, dst_port) } else { debug!("Could not extract quick info from packet"); true } } /// Extract IPs and ports without full parsing /// /// Tries multiple datalink formats (Ethernet, Raw IP, NULL) to find IP header. /// Only extracts the minimum fields needed for filtering. fn extract_quick_info(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { // Try Ethernet (most common) if let Some(info) = try_ethernet(packet) { return Some(info); } // Try Raw IP if let Some(info) = try_raw_ip(packet) { return Some(info); } // Try NULL/Loopback if let Some(info) = try_null_datalink(packet) { return Some(info); } None } /// Try to extract from Ethernet frame fn try_ethernet(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { if packet.len() < 14 { return None; } // EtherType at offset 12-13 let ethertype = u16::from_be_bytes([packet[12], packet[13]]); match ethertype { 0x0800 => extract_ipv4_info(&packet[14..]), // IPv4 0x86DD => extract_ipv6_info(&packet[14..]), // IPv6 _ => None, } } /// Try to extract from Raw IP fn try_raw_ip(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { if packet.is_empty() { return None; } // Check IP version (first 4 bits) let version = packet[0] >> 4; match version { 4 => extract_ipv4_info(packet), 6 => extract_ipv6_info(packet), _ => None, } } /// Try to extract from NULL/Loopback datalink fn try_null_datalink(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { if packet.len() < 4 { return None; } // NULL datalink has 4-byte header with address family // AF_INET = 2, AF_INET6 = 30 (on most systems) let family = u32::from_ne_bytes([packet[0], packet[1], packet[2], packet[3]]); match family { 2 => extract_ipv4_info(&packet[4..]), // AF_INET 30 | 28 => extract_ipv6_info(&packet[4..]), // AF_INET6 (varies by OS) _ => None, } } /// Extract IPv4 src/dst IPs and TCP ports (minimal parsing) fn extract_ipv4_info(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { // IPv4 header minimum: 20 bytes if packet.len() < 20 { return None; } // Check protocol (offset 9): must be TCP (6) if packet[9] != 6 { return None; } // Extract source IP (offset 12-15) let src_ip = IpAddr::V4(Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15])); // Extract destination IP (offset 16-19) let dst_ip = IpAddr::V4(Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19])); // Get IP header length (first 4 bits of byte 0, in 32-bit words) let ihl = (packet[0] & 0x0F) as usize; let ip_header_len = ihl.saturating_mul(4); // TCP header starts after IP header let tcp_offset = ip_header_len; if packet.len() < tcp_offset.saturating_add(4) { return None; } // Extract TCP ports (first 4 bytes of TCP header) let src_port = u16::from_be_bytes([packet[tcp_offset], packet[tcp_offset.saturating_add(1)]]); let dst_port = u16::from_be_bytes([ packet[tcp_offset.saturating_add(2)], packet[tcp_offset.saturating_add(3)], ]); Some((src_ip, dst_ip, src_port, dst_port)) } /// Extract IPv6 src/dst IPs and TCP ports (minimal parsing) fn extract_ipv6_info(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { // IPv6 header: 40 bytes minimum if packet.len() < 40 { return None; } // Check next header (offset 6): must be TCP (6) if packet[6] != 6 { return None; } // Extract source IP (offset 8-23) let src_ip = IpAddr::V6(Ipv6Addr::new( u16::from_be_bytes([packet[8], packet[9]]), u16::from_be_bytes([packet[10], packet[11]]), u16::from_be_bytes([packet[12], packet[13]]), u16::from_be_bytes([packet[14], packet[15]]), u16::from_be_bytes([packet[16], packet[17]]), u16::from_be_bytes([packet[18], packet[19]]), u16::from_be_bytes([packet[20], packet[21]]), u16::from_be_bytes([packet[22], packet[23]]), )); // Extract destination IP (offset 24-39) let dst_ip = IpAddr::V6(Ipv6Addr::new( u16::from_be_bytes([packet[24], packet[25]]), u16::from_be_bytes([packet[26], packet[27]]), u16::from_be_bytes([packet[28], packet[29]]), u16::from_be_bytes([packet[30], packet[31]]), u16::from_be_bytes([packet[32], packet[33]]), u16::from_be_bytes([packet[34], packet[35]]), u16::from_be_bytes([packet[36], packet[37]]), u16::from_be_bytes([packet[38], packet[39]]), )); // TCP header starts at offset 40 (IPv6 header is fixed 40 bytes) if packet.len() < 44 { return None; } // Extract TCP ports let src_port = u16::from_be_bytes([packet[40], packet[41]]); let dst_port = u16::from_be_bytes([packet[42], packet[43]]); Some((src_ip, dst_ip, src_port, dst_port)) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/ttl.rs
huginn-net-tcp/src/ttl.rs
use crate::tcp::Ttl; pub fn guess_distance(ttl: u8) -> u8 { if ttl > 128 { 255u8.saturating_sub(ttl) } else if ttl > 64 { 128u8.saturating_sub(ttl) } else if ttl > 32 { 64u8.saturating_sub(ttl) } else { 32u8.saturating_sub(ttl) } } const MAX_HOPS_ACCEPTABLE: u8 = 30; /// Calculate TTL using the Known TTL values from common operating systems /// TTL_WINDOWS: u8 = 128; // Windows typically uses 128 /// TTL_LINUX: u8 = 64; // Linux typically uses 64 /// TTL_OSX: u8 = 64; // macOS typically uses 64 /// TTL_IOS: u8 = 64; // iOS typically uses 64 /// TTL_ANDROID: u8 = 64; // Android typically uses 64 /// TTL_FREEBSD: u8 = 255; // FreeBSD typically uses 255 /// /// How calculate the ttl: /// 1. Guess the distance from the observed ttl /// 2. Determine the likely initial ttl based on the observed ttl range /// 3. If ttl_observed is 0, return Ttl::Bad(0) /// 4. If the distance is reasonable (e.g., <= MAX_HOPS_ACCEPTABLE hops), consider it a valid distance calculation /// 5. If the ttl doesn't match common patterns, classify it as Ttl::Value (raw ttl) pub fn calculate_ttl(ttl_observed: u8) -> Ttl { if ttl_observed == 0 { return Ttl::Bad(ttl_observed); } let distance = guess_distance(ttl_observed); if distance <= MAX_HOPS_ACCEPTABLE { Ttl::Distance(ttl_observed, distance) } else { Ttl::Value(ttl_observed) } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/window_size.rs
huginn-net-tcp/src/window_size.rs
use crate::tcp::{IpVersion, WindowSize}; /// Detects window size patterns following p0f's logic pub fn detect_win_multiplicator( window_size: u16, mss: u16, total_header: u16, has_ts: bool, ip_ver: &IpVersion, ) -> WindowSize { const MIN_TCP4: u16 = 40; // 20 IP + 20 TCP const MIN_TCP6: u16 = 60; // 40 IP + 20 TCP const ETH_MTU: u16 = 1500; // Standard Ethernet MTU const TS_SIZE: u16 = 12; // TCP Timestamp option size in bytes (8 bytes for timestamps + 2 for kind/length + 2 padding) const MAX_MULTIPLIER: u16 = 255; // Maximum value for u8 multiplier (used in Mss and Mtu variants) // If there's no window or MSS is too small, return direct value if window_size == 0 || mss < 100 { return WindowSize::Value(window_size); } // 1. First check MSS multiples macro_rules! check_mss_div { ($div:expr) => { if $div != 0 && window_size % $div == 0 { let multiplier = window_size / $div; if multiplier <= MAX_MULTIPLIER { return WindowSize::Mss(multiplier as u8); } } }; } // 1.1 Check basic MSS and timestamp-adjusted MSS if mss > 0 { check_mss_div!(mss); if has_ts && mss > TS_SIZE { check_mss_div!(mss.saturating_sub(TS_SIZE)); } } // 2. Check common modulo patterns first // These are typical values used by different operating systems // Iterate in reverse order to find the largest modulo that divides window_size let modulos = [256, 512, 1024, 2048, 4096]; for &modulo in modulos.iter().rev() { if window_size.checked_rem(modulo) == Some(0) { return WindowSize::Mod(modulo); } } // 3. Check MTU multiples macro_rules! check_mtu_div { ($div:expr) => { if $div != 0 && window_size % $div == 0 { let multiplier = window_size / $div; if multiplier <= MAX_MULTIPLIER { return WindowSize::Mtu(multiplier as u8); } } }; } // Standard Ethernet MTU check_mtu_div!(ETH_MTU); // MTU adjusted for IPv4/IPv6 match ip_ver { IpVersion::V4 => { check_mtu_div!(ETH_MTU - MIN_TCP4); if has_ts { check_mtu_div!(ETH_MTU - MIN_TCP4 - TS_SIZE); } } IpVersion::V6 => { check_mtu_div!(ETH_MTU - MIN_TCP6); if has_ts { check_mtu_div!(ETH_MTU - MIN_TCP6 - TS_SIZE); } } IpVersion::Any => {} } // 4. Check special MTU cases if mss > 0 { if total_header > 0 { check_mtu_div!(mss.saturating_add(total_header)); } else { match ip_ver { IpVersion::V4 => check_mtu_div!(mss.saturating_add(MIN_TCP4)), IpVersion::V6 => check_mtu_div!(mss.saturating_add(MIN_TCP6)), _ => {} } } } // If no pattern is found, return direct value WindowSize::Value(window_size) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/output.rs
huginn-net-tcp/src/output.rs
use crate::observable::ObservableTcp; use huginn_net_db::tcp::Ttl; use huginn_net_db::{Label, MatchQualityType, Type}; use std::fmt; use std::fmt::Formatter; /// Represents the output from TCP analysis. /// /// This struct contains various optional outputs that can be derived /// from analyzing TCP packets. #[derive(Debug)] pub struct TcpAnalysisResult { /// Information derived from SYN packets. pub syn: Option<SynTCPOutput>, /// Information derived from SYN-ACK packets. pub syn_ack: Option<SynAckTCPOutput>, /// Information about the Maximum Transmission Unit (MTU). pub mtu: Option<MTUOutput>, /// Information about the client system uptime. pub client_uptime: Option<UptimeOutput>, /// Information about the server system uptime. pub server_uptime: Option<UptimeOutput>, } #[derive(Debug, Clone, PartialEq)] pub struct IpPort { pub ip: std::net::IpAddr, pub port: u16, } impl IpPort { pub fn new(ip: std::net::IpAddr, port: u16) -> Self { Self { ip, port } } } /// Represents an operative system. /// /// This struct contains the name, family, variant, and kind of operative system. /// Examples: /// - name: "Linux", family: "unix", variant: "2.2.x-3.x", kind: Type::Specified /// - name: "Windows", family: "win", variant: "NT kernel 6.x", kind: Type::Specified /// - name: "iOS", family: "unix", variant: "iPhone or iPad", kind: Type::Specified #[derive(Debug)] pub struct OperativeSystem { pub name: String, pub family: Option<String>, pub variant: Option<String>, pub kind: Type, } impl From<&Label> for OperativeSystem { fn from(label: &Label) -> Self { OperativeSystem { name: label.name.clone(), family: label.class.clone(), variant: label.flavor.clone(), kind: label.ty.clone(), } } } /// The operative system with the highest quality that matches the packet. Quality is a value between 0.0 and 1.0. 1.0 is a perfect match. #[derive(Debug)] pub struct OSQualityMatched { pub os: Option<OperativeSystem>, pub quality: MatchQualityType, } /// Holds information derived from analyzing a TCP SYN packet (client initiation). /// /// This structure contains details about the client system based on its SYN packet, /// including the identified OS/application label and the raw TCP signature. #[derive(Debug)] pub struct SynTCPOutput { /// The source IP address and port of the client sending the SYN. pub source: IpPort, /// The destination IP address and port of the server receiving the SYN. pub destination: IpPort, /// The operative system with the highest quality that matches the SYN packet. pub os_matched: OSQualityMatched, /// The raw TCP signature extracted from the SYN packet. pub sig: ObservableTcp, } impl fmt::Display for SynTCPOutput { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "[TCP SYN] {}:{} → {}:{}\n\ OS: {}\n\ Dist: {}\n\ Params: {}\n\ Sig: {}\n", self.source.ip, self.source.port, self.destination.ip, self.destination.port, self.os_matched.os.as_ref().map_or("???".to_string(), |os| { format!( "{}/{}/{}", os.name, os.family.as_deref().unwrap_or("???"), os.variant.as_deref().unwrap_or("??") ) }), match self.sig.matching.ittl { Ttl::Distance(_, distance) => distance, Ttl::Bad(value) => value, Ttl::Value(value) => value, Ttl::Guess(value) => value, }, self.os_matched .os .as_ref() .map_or("none".to_string(), |os| os.kind.to_string()), self.sig, ) } } /// Holds information derived from analyzing a TCP SYN+ACK packet (server response). /// /// This structure contains details about the server system based on its SYN+ACK packet, /// including the identified OS/application label and the raw TCP signature. #[derive(Debug)] pub struct SynAckTCPOutput { /// The source IP address and port of the server sending the SYN+ACK. pub source: IpPort, /// The destination IP address and port of the client receiving the SYN+ACK. pub destination: IpPort, /// The operative system with the highest quality that matches the SYN+ACK packet. pub os_matched: OSQualityMatched, /// The raw TCP signature extracted from the SYN+ACK packet. pub sig: ObservableTcp, } impl fmt::Display for SynAckTCPOutput { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "[TCP SYN+ACK] {}:{} → {}:{}\n\ OS: {}\n\ Dist: {}\n\ Params: {}\n\ Sig: {}\n", self.source.ip, self.source.port, self.destination.ip, self.destination.port, self.os_matched.os.as_ref().map_or("???".to_string(), |os| { format!( "{}/{}/{}", os.name, os.family.as_deref().unwrap_or("???"), os.variant.as_deref().unwrap_or("??") ) }), match self.sig.matching.ittl { Ttl::Distance(_, distance) => distance, Ttl::Bad(value) => value, Ttl::Value(value) => value, Ttl::Guess(value) => value, }, self.os_matched .os .as_ref() .map_or("none".to_string(), |os| os.kind.to_string()), self.sig, ) } } #[derive(Debug)] pub struct MTUQualityMatched { pub link: Option<String>, pub quality: MatchQualityType, } /// Holds information about the estimated Maximum Transmission Unit (MTU) of a link. /// /// This structure contains the source and destination addresses, an estimation /// of the link type based on common MTU values, and the calculated raw MTU value. #[derive(Debug)] pub struct MTUOutput { /// The source IP address and port (usually the client). pub source: IpPort, /// The destination IP address and port (usually the server). pub destination: IpPort, /// An estimated link type (e.g., "Ethernet", "PPPoE") based on the calculated MTU. pub link: MTUQualityMatched, /// The calculated Maximum Transmission Unit (MTU) value in bytes. pub mtu: u16, } impl fmt::Display for MTUOutput { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "[TCP MTU] {}:{} → {}:{}\n\ Link: {}\n\ MTU: {}\n", self.source.ip, self.source.port, self.destination.ip, self.destination.port, self.link .link .as_ref() .map_or("???".to_string(), |link| link.clone()), self.mtu, ) } } /// Represents the role of the host in the connection for uptime tracking. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UptimeRole { Client, Server, } impl fmt::Display for UptimeRole { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { UptimeRole::Client => write!(f, "client"), UptimeRole::Server => write!(f, "server"), } } } /// Holds uptime information derived from TCP timestamp analysis. /// /// This structure contains the estimated uptime components (days, hours, minutes), /// the timestamp clock's wraparound period (`up_mod_days`), and the calculated /// clock frequency (`freq`). Note that the days/hours/minutes calculation based /// on the timestamp value might be approximate. #[derive(Debug)] pub struct UptimeOutput { /// The source IP address and port of the connection. pub source: IpPort, /// The destination IP address and port of the connection. pub destination: IpPort, /// The role of the host (client or server). pub role: UptimeRole, /// Estimated uptime in days, derived from the TCP timestamp value. Potentially approximate. pub days: u32, /// Estimated uptime component in hours. Potentially approximate. pub hours: u32, /// Estimated uptime component in minutes. Potentially approximate. pub min: u32, /// The calculated period in days after which the timestamp counter wraps around (2^32 ticks). pub up_mod_days: u32, /// The calculated frequency of the remote system's timestamp clock in Hertz (Hz). pub freq: f64, } impl fmt::Display for UptimeOutput { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let role_str = match self.role { UptimeRole::Client => "Client", UptimeRole::Server => "Server", }; write!( f, "[TCP Uptime - {}] {}:{} → {}:{}\n\ Uptime: {} days, {} hrs, {} min (modulo {} days)\n\ Freq: {:.2} Hz\n", role_str, self.source.ip, self.source.port, self.destination.ip, self.destination.port, self.days, self.hours, self.min, self.up_mod_days, self.freq, ) } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/packet_parser.rs
huginn-net-tcp/src/packet_parser.rs
/// Packet parsing utilities for different network packet formats /// /// This module provides unified parsing for various network packet formats /// from both live network capture and PCAP files: /// - Ethernet frames (most common in network interfaces) /// - Raw IP packets (tunnels, loopback interfaces) /// - NULL datalink packets (specialized capture tools) /// - Future packet formats can be added here use pnet::packet::ethernet::{EtherTypes, EthernetPacket}; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use tracing::debug; /// Represents the result of IP packet parsing #[derive(Debug)] pub enum IpPacket<'a> { Ipv4(Ipv4Packet<'a>), Ipv6(Ipv6Packet<'a>), None, } /// Datalink format types supported #[derive(Debug, Clone, Copy, PartialEq)] pub enum DatalinkFormat { /// Standard Ethernet frame (14-byte header) Ethernet, /// Raw IP packet (no datalink header) RawIp, /// NULL datalink with 4-byte header (0x1e 0x00 ...) Null, } /// Parse a network packet using multiple format strategies /// /// Tries different parsing strategies in order of likelihood: /// 1. Ethernet (most common in network interfaces and PCAPs) /// 2. Raw IP (tunnels, loopback interfaces, some PCAPs) /// 3. NULL datalink (specialized capture tools) /// /// Works with packets from both live network capture and PCAP files. /// /// # Arguments /// * `packet` - Raw packet bytes from network interface or PCAP file /// /// # Returns /// * `IpPacket` - The parsed IP packet or None if no valid format found pub fn parse_packet(packet: &[u8]) -> IpPacket<'_> { // Strategy 1: Try Ethernet first (most common) if let Some(parsed) = try_ethernet_format(packet) { return parsed; } // Strategy 2: Try Raw IP (no Ethernet header) if let Some(parsed) = try_raw_ip_format(packet) { return parsed; } // Strategy 3: Try NULL datalink (skip 4-byte header) if let Some(parsed) = try_null_datalink_format(packet) { return parsed; } IpPacket::None } /// Try parsing as Ethernet frame fn try_ethernet_format(packet: &[u8]) -> Option<IpPacket<'_>> { // Ethernet header is 14 bytes: [6B dst][6B src][2B ethertype] if packet.len() < 14 { return None; } let ethernet = EthernetPacket::new(packet)?; let ip_data = &packet[14..]; // Skip 14-byte Ethernet header match ethernet.get_ethertype() { EtherTypes::Ipv4 => { if let Some(ipv4) = Ipv4Packet::new(ip_data) { debug!("Parsed Ethernet IPv4 packet"); return Some(IpPacket::Ipv4(ipv4)); } } EtherTypes::Ipv6 => { if let Some(ipv6) = Ipv6Packet::new(ip_data) { debug!("Parsed Ethernet IPv6 packet"); return Some(IpPacket::Ipv6(ipv6)); } } _ => {} } None } /// Try parsing as Raw IP (no datalink header) fn try_raw_ip_format(packet: &[u8]) -> Option<IpPacket<'_>> { if packet.len() < 20 { return None; } // Check IP version in first 4 bits let version = (packet[0] & 0xF0) >> 4; match version { 4 => { if let Some(ipv4) = Ipv4Packet::new(packet) { debug!("Parsed Raw IPv4 packet"); return Some(IpPacket::Ipv4(ipv4)); } } 6 => { if let Some(ipv6) = Ipv6Packet::new(packet) { debug!("Parsed Raw IPv6 packet"); return Some(IpPacket::Ipv6(ipv6)); } } _ => {} } None } /// Try parsing as NULL datalink format (4-byte header) fn try_null_datalink_format(packet: &[u8]) -> Option<IpPacket<'_>> { // Check for NULL datalink signature and minimum size if packet.len() < 24 || packet[0] != 0x1e || packet[1] != 0x00 { return None; } let ip_data = &packet[4..]; // Skip 4-byte NULL header let version = (ip_data[0] & 0xF0) >> 4; match version { 4 => { if let Some(ipv4) = Ipv4Packet::new(ip_data) { debug!("Parsed NULL datalink IPv4 packet"); return Some(IpPacket::Ipv4(ipv4)); } } 6 => { if let Some(ipv6) = Ipv6Packet::new(ip_data) { debug!("Parsed NULL datalink IPv6 packet"); return Some(IpPacket::Ipv6(ipv6)); } } _ => {} } None } /// Detect the datalink format of a packet without full parsing /// /// Useful for statistics or format validation pub fn detect_datalink_format(packet: &[u8]) -> Option<DatalinkFormat> { // Check NULL datalink first (most specific signature) if packet.len() >= 24 && packet[0] == 0x1e && packet[1] == 0x00 { let ip_data = &packet[4..]; let version = (ip_data[0] & 0xF0) >> 4; if version == 4 || version == 6 { return Some(DatalinkFormat::Null); } } // Check Raw IP (check if it starts with valid IP version) if packet.len() >= 20 { let version = (packet[0] & 0xF0) >> 4; if version == 4 || version == 6 { // Additional validation for IPv4 if version == 4 { let ihl = (packet[0] & 0x0F).saturating_mul(4); if ihl >= 20 && packet.len() >= usize::from(ihl) { return Some(DatalinkFormat::RawIp); } } // Additional validation for IPv6 else if version == 6 && packet.len() >= 40 { return Some(DatalinkFormat::RawIp); } } } // Check Ethernet (least specific - needs valid EtherType) if packet.len() >= 14 { if let Some(ethernet) = EthernetPacket::new(packet) { let ethertype = ethernet.get_ethertype(); // Only consider it Ethernet if it has a valid IP EtherType if ethertype == EtherTypes::Ipv4 || ethertype == EtherTypes::Ipv6 { let ip_data = &packet[14..]; if !ip_data.is_empty() { let version = (ip_data[0] & 0xF0) >> 4; if (ethertype == EtherTypes::Ipv4 && version == 4) || (ethertype == EtherTypes::Ipv6 && version == 6) { return Some(DatalinkFormat::Ethernet); } } } } } None }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/src/uptime.rs
huginn-net-tcp/src/uptime.rs
use crate::observable::ObservableUptime; use std::net::IpAddr; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tracing::debug; use ttl_cache::TtlCache; // p0f-style constants for uptime calculation const MIN_TWAIT: u64 = 25; // Minimum time interval (ms) - p0f value const MAX_TWAIT: u64 = 600000; // Maximum time interval (ms) - 10 minutes const MIN_TS_DIFF: u32 = 5; // Minimum timestamp difference (ticks) - p0f value const TSTAMP_GRACE: u64 = 100; // Tolerance for timestamps going backward (ms) - p0f value const MAX_FINAL_HZ: f64 = 1500.0; // Maximum frequency (Hz) const MIN_FINAL_HZ: f64 = 1.0; // Minimum frequency (Hz) const GUESS_HZ_1K: f64 = 1000.0; // Common frequency guess: 1000 Hz const GUESS_HZ_100: f64 = 100.0; // Common frequency guess: 100 Hz const GUESS_TOLERANCE: f64 = 0.10; // Tolerance for frequency guessing // Connection tracking cache TTL const CONNECTION_CACHE_TTL_SECS: u64 = 30; // Time-to-live for cached connection data (seconds) #[derive(Debug, Hash, Eq, PartialEq, Clone)] pub struct Connection { pub src_ip: IpAddr, pub src_port: u16, pub dst_ip: IpAddr, pub dst_port: u16, } /// Connection tracking key that includes direction. /// This ensures client and server timestamps are tracked separately. #[derive(Debug, Hash, Eq, PartialEq, Clone)] pub struct ConnectionKey { /// The connection tuple pub connection: Connection, /// True if this is tracking the client (src in original packet) pub is_client: bool, } /// TCP timestamp information for a single packet #[derive(Debug, Clone)] pub struct TcpTimestamp { /// Timestamp value (TSval) from TCP option pub ts_val: u32, /// Time when packet was received (ms since epoch) pub recv_time_ms: u64, /// Flag to indicate if frequency calculation failed (p0f equivalent: FrequencyState::Invalid) pub is_bad_frequency: bool, } impl TcpTimestamp { pub fn new(ts_val: u32, recv_time_ms: u64) -> Self { Self { ts_val, recv_time_ms, is_bad_frequency: false } } pub fn now(ts_val: u32) -> Self { Self { ts_val, recv_time_ms: get_unix_time_ms().unwrap_or(0), is_bad_frequency: false } } /// Create a marker timestamp indicating bad frequency (p0f equivalent: FrequencyState::Invalid) pub fn bad_frequency_marker() -> Self { Self { ts_val: 0, recv_time_ms: 0, is_bad_frequency: true } } } /// Represents the state of a frequency calculation #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum FrequencyState { /// Frequency not yet calculated (waiting for second packet) NotCalculated, /// Frequency calculation failed or is invalid Invalid, /// Valid frequency in Hz Valid(u32), } impl FrequencyState { /// Returns `true` if the frequency state is valid pub fn is_valid(&self) -> bool { matches!(self, FrequencyState::Valid(_)) } /// Returns `true` if the frequency state is invalid pub fn is_invalid(&self) -> bool { matches!(self, FrequencyState::Invalid) } pub fn value(&self) -> Option<u32> { match self { FrequencyState::Valid(freq) => Some(*freq), _ => None, } } } /// Uptime tracking information for a host #[derive(Debug, Clone)] pub struct UptimeTracker { /// Last SYN timestamp (for client tracking) pub last_syn: Option<TcpTimestamp>, /// Last SYN+ACK timestamp (for server tracking) pub last_syn_ack: Option<TcpTimestamp>, /// Client frequency state pub cli_freq: FrequencyState, /// Server frequency state pub srv_freq: FrequencyState, /// Last calculated uptime information pub last_uptime: Option<ObservableUptime>, } impl UptimeTracker { pub fn new() -> Self { Self { last_syn: None, last_syn_ack: None, cli_freq: FrequencyState::NotCalculated, srv_freq: FrequencyState::NotCalculated, last_uptime: None, } } /// Mark client frequency as bad/invalid pub fn mark_client_frequency_bad(&mut self) { self.cli_freq = FrequencyState::Invalid; } /// Mark server frequency as bad/invalid pub fn mark_server_frequency_bad(&mut self) { self.srv_freq = FrequencyState::Invalid; } /// Check if client frequency is valid pub fn has_valid_client_frequency(&self) -> bool { self.cli_freq.is_valid() } /// Check if server frequency is valid pub fn has_valid_server_frequency(&self) -> bool { self.srv_freq.is_valid() } } impl Default for UptimeTracker { fn default() -> Self { Self::new() } } fn get_unix_time_ms() -> Option<u64> { let now = SystemTime::now(); now.duration_since(UNIX_EPOCH) .ok() .map(|duration| duration.as_millis() as u64) } fn guess_frequency(raw_freq: f64, base_guess: f64, tolerance: f64) -> Option<f64> { if raw_freq <= 0.0 || base_guess <= 0.0 || !raw_freq.is_finite() { return None; } let multiplier = (raw_freq / base_guess).round(); if multiplier <= 0.0 { return None; } let normalized = raw_freq / multiplier; if (normalized - base_guess).abs() <= base_guess * tolerance { Some(base_guess) } else { None } } /// Smart frequency rounding following p0f methodology /// This function rounds frequencies to common OS values based on ranges fn round_frequency_p0f_style(freq: f64) -> u32 { let freq = freq as u32; match freq { 0 => 1, // Special case: 0 Hz -> 1 Hz 1..=10 => freq, // No rounding for very low frequencies 11..=50 => freq.saturating_add(3).saturating_div(5).saturating_mul(5), // Round to multiples of 5: 11->10, 13->15, 18->20 51..=100 => freq.saturating_add(7).saturating_div(10).saturating_mul(10), // Round to multiples of 10: 51->50, 55->60, 99->100 101..=500 => freq .saturating_add(33) .saturating_div(50) .saturating_mul(50), // Round to multiples of 50: 101->100, 125->150, 248->250 _ => freq .saturating_add(67) .saturating_div(100) .saturating_mul(100), // Round to multiples of 100: 501->500, 650->700, 997->1000 } } /// Calculate frequency between two timestamps with p0f-style backward handling fn calculate_frequency_p0f_style( current: &TcpTimestamp, reference: &TcpTimestamp, ) -> Result<f64, String> { // Calculate time and timestamp differences let ms_diff = current.recv_time_ms.saturating_sub(reference.recv_time_ms); let ts_diff = current.ts_val.wrapping_sub(reference.ts_val); // Validate time interval if ms_diff < MIN_TWAIT { return Err(format!("Time interval too short: {ms_diff}ms < {MIN_TWAIT}ms")); } if ms_diff > MAX_TWAIT { return Err(format!("Time interval too long: {ms_diff}ms > {MAX_TWAIT}ms")); } // First, detect if timestamp went backward (p0f-style detection) let is_backward = ts_diff > !ts_diff; if is_backward { // Timestamp went backward let inverted_diff = !ts_diff; // Validate minimum timestamp difference for backward movement if inverted_diff < MIN_TS_DIFF { return Err(format!( "Backward timestamp difference too small: {inverted_diff} ticks < {MIN_TS_DIFF} ticks (MIN_TS_DIFF)" )); } // p0f validation: reject if within grace period AND backward amount is too large // Formula: (~ts_diff) / 1000 < MAX_TSCALE / TSTAMP_GRACE // This rejects backwards movements that would imply unreasonably high frequencies if ms_diff < TSTAMP_GRACE { let max_backward_ticks = (MAX_FINAL_HZ / TSTAMP_GRACE as f64) * 1000.0; if (inverted_diff as f64) > max_backward_ticks { return Err(format!( "Backward timestamp too large within grace period: {inverted_diff} ticks > {max_backward_ticks:.0} max" )); } } } else { // Forward movement - validate minimum difference if ts_diff < MIN_TS_DIFF { return Err(format!( "Timestamp difference too small: {ts_diff} ticks < {MIN_TS_DIFF} ticks (MIN_TS_DIFF)" )); } } // Calculate frequency with backward timestamp handling let effective_ms_diff = ms_diff.max(1); let raw_freq = if ts_diff > !ts_diff { // Timestamp went backward - use inverted difference let inverted_diff = !ts_diff; (inverted_diff as f64 * 1000.0) / (effective_ms_diff as f64) } else { // Normal forward progression (ts_diff as f64 * 1000.0) / (effective_ms_diff as f64) }; // Validate frequency range if !(MIN_FINAL_HZ..=MAX_FINAL_HZ).contains(&raw_freq) { return Err(format!( "Frequency out of valid range: {raw_freq:.2} Hz (valid: {MIN_FINAL_HZ}-{MAX_FINAL_HZ} Hz), ms_diff={ms_diff}, ts_diff={ts_diff}" )); } Ok(raw_freq) } /// New improved uptime calculation function using UptimeTracker pub fn calculate_uptime_improved( tracker: &mut UptimeTracker, ts_val: u32, from_client: bool, ) -> Option<ObservableUptime> { let current_ts = TcpTimestamp::now(ts_val); if from_client { // Store SYN timestamp for client tracker.last_syn = Some(current_ts); return None; // SYN packets don't calculate uptime } else { // This is a server response (SYN+ACK or ACK) // Try to calculate uptime using stored SYN timestamp if let Some(ref syn_ts) = tracker.last_syn { // Check if we already have a valid client frequency if let Some(freq_hz) = tracker.cli_freq.value() { // Use existing frequency to calculate uptime let uptime_info = calculate_uptime_from_frequency(ts_val, freq_hz as f64); tracker.last_uptime = Some(uptime_info.clone()); return Some(uptime_info); } // Check if client frequency is marked as bad if tracker.cli_freq.is_invalid() { return None; } // Try to calculate new frequency match calculate_frequency_p0f_style(&current_ts, syn_ts) { Ok(raw_freq) => { // Apply intelligent rounding let final_freq = if let Some(freq) = guess_frequency(raw_freq, GUESS_HZ_1K, GUESS_TOLERANCE) { freq } else if let Some(freq) = guess_frequency(raw_freq, GUESS_HZ_100, GUESS_TOLERANCE) { freq } else { round_frequency_p0f_style(raw_freq) as f64 }; // Store the calculated frequency tracker.cli_freq = FrequencyState::Valid(final_freq as u32); // Calculate uptime let uptime_info = calculate_uptime_from_frequency(ts_val, final_freq); tracker.last_uptime = Some(uptime_info.clone()); return Some(uptime_info); } Err(_) => { // Mark frequency as bad to avoid repeated attempts tracker.mark_client_frequency_bad(); return None; } } } } None } /// Calculate uptime from timestamp and known frequency fn calculate_uptime_from_frequency(ts_val: u32, freq_hz: f64) -> ObservableUptime { let uptime_seconds = ts_val as f64 / freq_hz; let days = (uptime_seconds / (24.0 * 3600.0)) as u32; let hours = ((uptime_seconds % (24.0 * 3600.0)) / 3600.0) as u32; let minutes = ((uptime_seconds % 3600.0) / 60.0) as u32; // Calculate wrap-around period let up_mod_days = (u32::MAX as f64 / (freq_hz * 60.0 * 60.0 * 24.0)) as u32; ObservableUptime { days, hours, min: minutes, up_mod_days, freq: freq_hz } } pub fn check_ts_tcp( connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, connection: &Connection, from_client: bool, ts_val: u32, ) -> (Option<ObservableUptime>, Option<ObservableUptime>) { // Create a key that identifies this endpoint's timestamps // Client and server timestamps are tracked separately let tracking_key = ConnectionKey { connection: connection.clone(), is_client: from_client }; // Create TcpTimestamp for current packet let current_ts = TcpTimestamp::now(ts_val); if from_client { // This is a client packet (SYN or ACK) // Check if we have a previous timestamp from the same client if let Some(reference_ts) = connection_tracker.get(&tracking_key) { // p0f: if frequency is marked as bad (equivalent to FrequencyState::Invalid), don't recalculate if reference_ts.is_bad_frequency { debug!( "Client frequency already marked as bad for {}:{}, skipping", connection.src_ip, connection.src_port ); return (None, None); } // We have a previous packet from this client, calculate uptime debug!( "Client packet: {}:{} -> {}:{}, ref_ts={}, current_ts={}, diff={}", connection.src_ip, connection.src_port, connection.dst_ip, connection.dst_port, reference_ts.ts_val, ts_val, ts_val.wrapping_sub(reference_ts.ts_val) ); match calculate_frequency_p0f_style(&current_ts, reference_ts) { Ok(raw_freq) => { // Apply intelligent rounding let final_freq = if let Some(freq) = guess_frequency(raw_freq, GUESS_HZ_1K, GUESS_TOLERANCE) { freq } else if let Some(freq) = guess_frequency(raw_freq, GUESS_HZ_100, GUESS_TOLERANCE) { freq } else { round_frequency_p0f_style(raw_freq) as f64 }; // Calculate uptime using the improved method let uptime_info = calculate_uptime_from_frequency(ts_val, final_freq); debug!( "CLIENT Uptime detected: {:.2} Hz -> {} Hz, {} days {} hrs {} min (mod {} days)", raw_freq, final_freq, uptime_info.days, uptime_info.hours, uptime_info.min, uptime_info.up_mod_days ); return (Some(uptime_info), None); } Err(error) => { debug!("Client uptime calculation failed: {}", error); // p0f: Mark frequency as bad to avoid recalculation (equivalent to FrequencyState::Invalid) // Store a bad frequency marker connection_tracker.insert( tracking_key.clone(), TcpTimestamp::bad_frequency_marker(), Duration::new(CONNECTION_CACHE_TTL_SECS, 0), ); } } } else { // First packet from this client, store it debug!( "Storing first client packet: {}:{} -> {}:{}, ts_val={}", connection.src_ip, connection.src_port, connection.dst_ip, connection.dst_port, ts_val ); connection_tracker.insert( tracking_key, current_ts, Duration::new(CONNECTION_CACHE_TTL_SECS, 0), ); } } else { // This is a server packet (SYN+ACK or ACK) // Check if we have a previous timestamp from the same server if let Some(reference_ts) = connection_tracker.get(&tracking_key) { // p0f: if frequency is marked as bad (equivalent to FrequencyState::Invalid), don't recalculate if reference_ts.is_bad_frequency { debug!( "Server frequency already marked as bad for {}:{}, skipping", connection.src_ip, connection.src_port ); return (None, None); } // We have a previous packet from this server, calculate uptime debug!( "Server packet: {}:{} -> {}:{}, ref_ts={}, current_ts={}, diff={}", connection.src_ip, connection.src_port, connection.dst_ip, connection.dst_port, reference_ts.ts_val, ts_val, ts_val.wrapping_sub(reference_ts.ts_val) ); match calculate_frequency_p0f_style(&current_ts, reference_ts) { Ok(raw_freq) => { // Apply intelligent rounding let final_freq = if let Some(freq) = guess_frequency(raw_freq, GUESS_HZ_1K, GUESS_TOLERANCE) { freq } else if let Some(freq) = guess_frequency(raw_freq, GUESS_HZ_100, GUESS_TOLERANCE) { freq } else { round_frequency_p0f_style(raw_freq) as f64 }; // Calculate uptime using the improved method let uptime_info = calculate_uptime_from_frequency(ts_val, final_freq); debug!( "SERVER Uptime detected: {:.2} Hz -> {} Hz, {} days {} hrs {} min (mod {} days)", raw_freq, final_freq, uptime_info.days, uptime_info.hours, uptime_info.min, uptime_info.up_mod_days ); return (None, Some(uptime_info)); } Err(error) => { debug!("Server uptime calculation failed: {}", error); // p0f: Mark frequency as bad to avoid recalculation // Store a bad frequency marker connection_tracker.insert( tracking_key.clone(), TcpTimestamp::bad_frequency_marker(), Duration::new(CONNECTION_CACHE_TTL_SECS, 0), ); } } } else { // First packet from this server, store it debug!( "Storing first server packet: {}:{} -> {}:{}, ts_val={}", connection.src_ip, connection.src_port, connection.dst_ip, connection.dst_port, ts_val ); connection_tracker.insert( tracking_key, current_ts, Duration::new(CONNECTION_CACHE_TTL_SECS, 0), ); } } (None, None) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/raw_filter_tests.rs
huginn-net-tcp/tests/raw_filter_tests.rs
use std::net::IpAddr; use huginn_net_tcp::filter::{FilterConfig, FilterMode, IpFilter, PortFilter}; /// Helper to create an IPv4 TCP packet (minimal: IP header + TCP ports) fn create_ipv4_tcp_packet( src_ip: [u8; 4], dst_ip: [u8; 4], src_port: u16, dst_port: u16, ) -> Vec<u8> { let mut packet = vec![0u8; 40]; packet[0] = 0x45; // IPv4, IHL=5 packet[9] = 6; // TCP protocol packet[12..16].copy_from_slice(&src_ip); packet[16..20].copy_from_slice(&dst_ip); packet[20..22].copy_from_slice(&src_port.to_be_bytes()); packet[22..24].copy_from_slice(&dst_port.to_be_bytes()); packet } /// Helper to create an Ethernet frame with IPv4 TCP packet fn create_ethernet_ipv4_tcp_packet( src_ip: [u8; 4], dst_ip: [u8; 4], src_port: u16, dst_port: u16, ) -> Vec<u8> { let mut packet = vec![0u8; 54]; packet[12..14].copy_from_slice(&[0x08, 0x00]); // IPv4 EtherType packet[14] = 0x45; // IPv4, IHL=5 packet[23] = 6; // TCP protocol packet[26..30].copy_from_slice(&src_ip); packet[30..34].copy_from_slice(&dst_ip); packet[34..36].copy_from_slice(&src_port.to_be_bytes()); packet[36..38].copy_from_slice(&dst_port.to_be_bytes()); packet } #[test] fn test_raw_filter_ipv4_raw_packet() { let packet = create_ipv4_tcp_packet([192, 168, 1, 100], [8, 8, 8, 8], 12345, 443); assert_eq!(packet.len(), 40); assert_eq!(packet[0], 0x45); // IPv4, IHL=5 assert_eq!(packet[9], 6); // TCP protocol } #[test] fn test_raw_filter_ethernet_frame() { let packet = create_ethernet_ipv4_tcp_packet([192, 168, 1, 100], [8, 8, 8, 8], 12345, 443); assert_eq!(packet.len(), 54); assert_eq!(&packet[12..14], &[0x08, 0x00]); // IPv4 EtherType assert_eq!(packet[14], 0x45); // IPv4, IHL=5 assert_eq!(packet[23], 6); // TCP protocol } #[test] fn test_raw_filter_allows_matching_destination_port() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_blocks_non_matching_destination_port() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 80)); } #[test] fn test_raw_filter_allows_matching_source_ip() { let filter = FilterConfig::new().mode(FilterMode::Allow).with_ip_filter( IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IP: {e}")) .source_only(), ); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_blocks_non_matching_source_ip() { let filter = FilterConfig::new().mode(FilterMode::Allow).with_ip_filter( IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IP: {e}")) .source_only(), ); let src_ip: IpAddr = "10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_combined_filters() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)) .with_ip_filter( IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IP: {e}")) .source_only(), ); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 80)); let wrong_src: IpAddr = "10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&wrong_src, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_deny_mode() { let filter = FilterConfig::new() .mode(FilterMode::Deny) .with_port_filter(PortFilter::new().destination(22)); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 22)); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_no_filter_allows_all() { let filter = FilterConfig::new(); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); assert!(filter.should_process(&src_ip, &dst_ip, 54321, 80)); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/tcp_process.rs
huginn-net-tcp/tests/tcp_process.rs
use huginn_net_tcp::tcp_process::{from_client, from_server, is_valid}; use pnet::packet::tcp::TcpFlags; #[test] fn test_from_client() { assert!(from_client(TcpFlags::SYN)); assert!(!from_client(TcpFlags::SYN | TcpFlags::ACK)); assert!(!from_client(TcpFlags::ACK)); } #[test] fn test_from_server() { assert!(from_server(TcpFlags::SYN | TcpFlags::ACK)); assert!(!from_server(TcpFlags::SYN)); assert!(!from_server(TcpFlags::ACK)); assert!(!from_server(TcpFlags::RST)); } #[test] fn test_is_valid() { assert!(is_valid(TcpFlags::SYN, TcpFlags::SYN)); assert!(!is_valid(TcpFlags::SYN | TcpFlags::FIN, TcpFlags::SYN)); assert!(!is_valid(TcpFlags::SYN | TcpFlags::RST, TcpFlags::SYN)); assert!(!is_valid(TcpFlags::FIN | TcpFlags::RST, TcpFlags::FIN | TcpFlags::RST)); assert!(!is_valid(TcpFlags::SYN, 0)); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/filter_tests.rs
huginn-net-tcp/tests/filter_tests.rs
use huginn_net_tcp::{FilterConfig, FilterMode, IpFilter, PortFilter, SubnetFilter}; use std::net::IpAddr; #[test] fn test_port_filter_destination() { let filter = PortFilter::new().destination(443); assert!(filter.matches(12345, 443)); assert!(!filter.matches(12345, 80)); } #[test] fn test_port_filter_source() { let filter = PortFilter::new().source(12345); assert!(filter.matches(12345, 80)); assert!(!filter.matches(54321, 80)); } #[test] fn test_port_filter_list() { let filter = PortFilter::new().destination_list(vec![80, 443, 8080]); assert!(filter.matches(12345, 80)); assert!(filter.matches(12345, 443)); assert!(filter.matches(12345, 8080)); assert!(!filter.matches(12345, 22)); } #[test] fn test_port_filter_range() { let filter = PortFilter::new().destination_range(8000..9000); assert!(filter.matches(12345, 8000)); assert!(filter.matches(12345, 8500)); assert!(filter.matches(12345, 8999)); assert!(!filter.matches(12345, 9000)); assert!(!filter.matches(12345, 7999)); } #[test] fn test_port_filter_any_port() { let filter = PortFilter::new().destination(443).any_port(); assert!(filter.matches(12345, 443)); assert!(filter.matches(443, 80)); assert!(!filter.matches(12345, 80)); } #[test] fn test_port_filter_combined() { let filter = PortFilter::new().source(12345).destination(443); assert!(filter.matches(12345, 443)); assert!(!filter.matches(12345, 80)); assert!(!filter.matches(54321, 443)); } #[test] fn test_ip_filter_v4() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(filter.matches(&ip_other, &ip_match)); assert!(!filter.matches( &ip_other, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_ip_filter_v6() { let filter = IpFilter::new() .allow("2001:db8::1") .unwrap_or_else(|e| panic!("Invalid IPv6 address: {e}")); let ip_match: IpAddr = "2001:db8::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); let ip_other: IpAddr = "2001:db8::2" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(!filter.matches( &ip_other, &"2001:db8::3" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")) )); } #[test] fn test_ip_filter_source_only() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")) .source_only(); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(!filter.matches(&ip_other, &ip_match)); } #[test] fn test_ip_filter_destination_only() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")) .destination_only(); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_other, &ip_match)); assert!(!filter.matches(&ip_match, &ip_other)); } #[test] fn test_subnet_filter_v4() { let filter = SubnetFilter::new() .allow("192.168.1.0/24") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")); let ip_in: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "192.168.2.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_in, &ip_out)); assert!(!filter.matches( &ip_out, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_subnet_filter_v6() { let filter = SubnetFilter::new() .allow("2001:db8::/32") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")); let ip_in: IpAddr = "2001:db8::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); let ip_out: IpAddr = "2001:db9::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); assert!(filter.matches(&ip_in, &ip_out)); assert!(!filter.matches( &ip_out, &"2001:dba::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")) )); } #[test] fn test_subnet_filter_multiple() { let filter = SubnetFilter::new() .allow_list(vec!["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]) .unwrap_or_else(|e| panic!("Invalid CIDR notations: {e}")); let ip1: IpAddr = "10.1.2.3" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "172.16.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip3: IpAddr = "192.168.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip1, &ip_out)); assert!(filter.matches(&ip2, &ip_out)); assert!(filter.matches(&ip3, &ip_out)); assert!(!filter.matches( &ip_out, &"1.1.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_combined_filter_allow() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)) .with_subnet_filter( SubnetFilter::new() .allow("192.168.0.0/16") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")), ); let ip_in: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip_in, &ip_out, 12345, 443)); assert!(!filter.should_process(&ip_in, &ip_out, 12345, 80)); assert!(!filter.should_process( &ip_out, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")), 12345, 443 )); } #[test] fn test_combined_filter_deny() { let filter = FilterConfig::new() .mode(FilterMode::Deny) .with_subnet_filter( SubnetFilter::new() .allow_list(vec!["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]) .unwrap_or_else(|e| panic!("Invalid CIDR notations: {e}")), ); let ip_private: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_public: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(!filter.should_process(&ip_private, &ip_public, 12345, 443)); assert!(filter.should_process(&ip_public, &ip_public, 12345, 443)); } #[test] fn test_no_filters() { let filter = FilterConfig::new(); let ip1: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip1, &ip2, 12345, 443)); assert!(filter.should_process(&ip2, &ip1, 80, 12345)); } #[test] fn test_port_only_filter() { let filter = FilterConfig::new().with_port_filter(PortFilter::new().destination(443)); let ip1: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip1, &ip2, 12345, 443)); assert!(!filter.should_process(&ip1, &ip2, 12345, 80)); } #[test] fn test_ip_only_filter() { let filter = FilterConfig::new().with_ip_filter( IpFilter::new() .allow("8.8.8.8") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")), ); let ip_match: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip_match, &ip_other, 12345, 443)); assert!(filter.should_process(&ip_other, &ip_match, 12345, 443)); assert!(!filter.should_process( &ip_other, &"1.1.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")), 12345, 443 )); } #[test] fn test_invalid_ip() { let result = IpFilter::new().allow("not-an-ip"); assert!(result.is_err()); } #[test] fn test_invalid_cidr() { let result = SubnetFilter::new().allow("192.168.1.0/99"); assert!(result.is_err()); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/endianness.rs
huginn-net-tcp/tests/endianness.rs
#[test] fn test_tcp_timestamp_endianness() { // Simulate TCP timestamp option data in big-endian format // Option 8 (TIMESTAMPS), length 10, TSval=0x12345678, TSecr=0x87654321 let tcp_option_data = [ 8, 10, // Kind=8, Length=10 0x12, 0x34, 0x56, 0x78, // TSval = 0x12345678 (big-endian) 0x87, 0x65, 0x43, 0x21, // TSecr = 0x87654321 (big-endian) ]; // Extract TSval using big-endian (correct according to RFC 1323) let ts_val_bytes: [u8; 4] = [tcp_option_data[2], tcp_option_data[3], tcp_option_data[4], tcp_option_data[5]]; let ts_val_be = u32::from_be_bytes(ts_val_bytes); // Extract TSval using native-endian (incorrect) let ts_val_ne = u32::from_ne_bytes(ts_val_bytes); // On little-endian systems, these should be different println!("Big-endian TSval: 0x{ts_val_be:08x} ({ts_val_be})",); println!("Native-endian TSval: 0x{ts_val_ne:08x} ({ts_val_ne})"); // The correct value should be 0x12345678 assert_eq!(ts_val_be, 0x12345678); // On little-endian systems, native-endian would give 0x78563412 #[cfg(target_endian = "little")] { assert_eq!(ts_val_ne, 0x78563412); assert_ne!( ts_val_be, ts_val_ne, "Big-endian and native-endian should be different on little-endian systems" ); } // On big-endian systems, they would be equal #[cfg(target_endian = "big")] { assert_eq!( ts_val_be, ts_val_ne, "Big-endian and native-endian should be equal on big-endian systems" ); } } #[test] fn test_timestamp_zero_detection() { // Test to verify that zero timestamp detection works correctly let zero_timestamp = [ 8, 10, // Kind=8, Length=10 0x00, 0x00, 0x00, 0x00, // TSval = 0 (big-endian) 0x00, 0x00, 0x00, 0x00, // TSecr = 0 (big-endian) ]; let ts_val_bytes: [u8; 4] = [zero_timestamp[2], zero_timestamp[3], zero_timestamp[4], zero_timestamp[5]]; let ts_val = u32::from_be_bytes(ts_val_bytes); assert_eq!(ts_val, 0, "Zero timestamp should be detected correctly"); } #[test] fn test_realistic_timestamp_values() { // Test with realistic timestamp values // Simulate a Linux system with 1000 Hz that has been up for ~1 hour // 1 hour = 3600 seconds = 3,600,000 ticks at 1000 Hz let realistic_timestamp = [ 8, 10, // Kind=8, Length=10 0x00, 0x36, 0xEE, 0x80, // TSval = 3,600,000 (big-endian) 0x00, 0x00, 0x00, 0x00, // TSecr = 0 (big-endian) ]; let ts_val_bytes: [u8; 4] = [ realistic_timestamp[2], realistic_timestamp[3], realistic_timestamp[4], realistic_timestamp[5], ]; let ts_val = u32::from_be_bytes(ts_val_bytes); assert_eq!(ts_val, 3_600_000); // Calculate estimated uptime at 1000 Hz let estimated_uptime_seconds = ts_val / 1000; let estimated_uptime_hours = estimated_uptime_seconds / 3600; assert_eq!(estimated_uptime_hours, 1, "Should estimate ~1 hour of uptime"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/frequency_rounding.rs
huginn-net-tcp/tests/frequency_rounding.rs
//! Tests for p0f-style intelligent frequency rounding #[test] fn test_p0f_rounding_algorithm_behavior() { // Range 0: Special case // 0 Hz -> 1 Hz // Range 1-10: No rounding // 1 Hz -> 1 Hz, 5 Hz -> 5 Hz, 10 Hz -> 10 Hz // Range 11-50: Round to multiples of 5 // Formula: (freq + 3) / 5 * 5 // Examples: 11 -> (11+3)/5*5 = 14/5*5 = 2*5 = 10 // 13 -> (13+3)/5*5 = 16/5*5 = 3*5 = 15 // 18 -> (18+3)/5*5 = 21/5*5 = 4*5 = 20 let test_cases_range_11_50 = [ (11, 10), // (11+3)/5*5 = 14/5*5 = 2*5 = 10 (12, 15), // (12+3)/5*5 = 15/5*5 = 3*5 = 15 (13, 15), // (13+3)/5*5 = 16/5*5 = 3*5 = 15 (17, 20), // (17+3)/5*5 = 20/5*5 = 4*5 = 20 (18, 20), // (18+3)/5*5 = 21/5*5 = 4*5 = 20 (22, 25), // (22+3)/5*5 = 25/5*5 = 5*5 = 25 (48, 50), // (48+3)/5*5 = 51/5*5 = 10*5 = 50 ]; for (input, expected) in test_cases_range_11_50 { let calculated = (input + 3) / 5 * 5; assert_eq!(calculated, expected, "Range 11-50: {input} Hz should round to {expected} Hz"); } // Range 51-100: Round to multiples of 10 // Formula: (freq + 7) / 10 * 10 let test_cases_range_51_100 = [ (51, 50), // (51+7)/10*10 = 58/10*10 = 5*10 = 50 (55, 60), // (55+7)/10*10 = 62/10*10 = 6*10 = 60 (64, 70), // (64+7)/10*10 = 71/10*10 = 7*10 = 70 (95, 100), // (95+7)/10*10 = 102/10*10 = 10*10 = 100 (99, 100), // (99+7)/10*10 = 106/10*10 = 10*10 = 100 ]; for (input, expected) in test_cases_range_51_100 { let calculated = (input + 7) / 10 * 10; assert_eq!(calculated, expected, "Range 51-100: {input} Hz should round to {expected} Hz"); } // Range 101-500: Round to multiples of 50 // Formula: (freq + 33) / 50 * 50 let test_cases_range_101_500 = [ (101, 100), // (101+33)/50*50 = 134/50*50 = 2*50 = 100 (125, 150), // (125+33)/50*50 = 158/50*50 = 3*50 = 150 (248, 250), // (248+33)/50*50 = 281/50*50 = 5*50 = 250 (275, 300), // (275+33)/50*50 = 308/50*50 = 6*50 = 300 (499, 500), // (499+33)/50*50 = 532/50*50 = 10*50 = 500 ]; for (input, expected) in test_cases_range_101_500 { let calculated = (input + 33) / 50 * 50; assert_eq!(calculated, expected, "Range 101-500: {input} Hz should round to {expected} Hz"); } // Range >500: Round to multiples of 100 // Formula: (freq + 67) / 100 * 100 let test_cases_range_above_500 = [ (501, 500), // (501+67)/100*100 = 568/100*100 = 5*100 = 500 (650, 700), // (650+67)/100*100 = 717/100*100 = 7*100 = 700 (997, 1000), // (997+67)/100*100 = 1064/100*100 = 10*100 = 1000 (1050, 1100), // (1050+67)/100*100 = 1117/100*100 = 11*100 = 1100 ]; for (input, expected) in test_cases_range_above_500 { let calculated = (input + 67) / 100 * 100; assert_eq!(calculated, expected, "Range >500: {input} Hz should round to {expected} Hz"); } } #[test] fn test_common_os_frequencies() { let common_frequencies = [ // Linux frequencies (100.0, "Linux 2.4 default"), (250.0, "Linux 2.6+ CONFIG_HZ=250"), (1000.0, "Linux desktop CONFIG_HZ=1000"), // FreeBSD/OpenBSD (100.0, "FreeBSD/OpenBSD default"), // Windows (1000.0, "Windows 7/8/10/11"), // macOS (100.0, "macOS traditional"), // Embedded systems (10.0, "Embedded low-power"), (50.0, "Embedded moderate"), ]; for (freq, description) in common_frequencies { // Test that these frequencies are in valid range assert!( (1.0..=1500.0).contains(&freq), "{description} frequency {freq} Hz should be in valid range" ); // Test rounding behavior for slightly off values let slightly_off = freq * 1.05; // 5% higher assert!( slightly_off <= 1500.0, "Slightly off {description} frequency {slightly_off} Hz should still be processable" ); } } #[test] fn test_edge_cases() { // Boundary values for each range let boundary_tests = [ // Range boundaries (10, "Upper bound of no-rounding range"), (11, "Lower bound of 5-multiple range"), (50, "Upper bound of 5-multiple range"), (51, "Lower bound of 10-multiple range"), (100, "Upper bound of 10-multiple range"), (101, "Lower bound of 50-multiple range"), (500, "Upper bound of 50-multiple range"), (501, "Lower bound of 100-multiple range"), ]; for (freq, description) in boundary_tests { // Just verify these are reasonable values assert!( (1..=1500).contains(&freq), "Boundary case {description} ({freq} Hz) should be in valid range" ); } } #[test] fn test_rounding_precision() { // Simulate network jitter affecting frequency calculation let base_frequencies = [100.0, 250.0, 1000.0]; let jitter_percentages = [0.02, 0.05, 0.08]; // 2%, 5%, 8% jitter for base_freq in base_frequencies { for jitter in jitter_percentages { let jittered_up = base_freq * (1.0 + jitter); let jittered_down = base_freq * (1.0 - jitter); // Both jittered values should be in valid range let jitter_pct = jitter * 100.0; assert!( (1.0..=1500.0).contains(&jittered_up), "Jittered frequency {jittered_up} Hz ({jitter_pct}% up from {base_freq}) should be valid" ); assert!( (1.0..=1500.0).contains(&jittered_down), "Jittered frequency {jittered_down} Hz ({jitter_pct}% down from {base_freq}) should be valid" ); } } } #[test] fn test_frequency_families() { // 1000 Hz family (with 10% tolerance) let hz_1000_family = [ 900.0, // 10% below 950.0, // 5% below 1000.0, // Exact 1050.0, // 5% above 1100.0, // 10% above ]; for freq in hz_1000_family { let tolerance = 0.10; let diff_ratio = (freq - 1000.0_f64).abs() / 1000.0; // Should be recognized as 1000 Hz family if within tolerance assert!(diff_ratio <= tolerance, "{freq} Hz should be within 10% of 1000 Hz family"); } // 100 Hz family (with 10% tolerance) let hz_100_family = [ 90.0, // 10% below 95.0, // 5% below 100.0, // Exact 105.0, // 5% above 110.0, // 10% above ]; for freq in hz_100_family { let tolerance = 0.10; let diff_ratio = (freq - 100.0_f64).abs() / 100.0; // Should be recognized as 100 Hz family if within tolerance assert!(diff_ratio <= tolerance, "{freq} Hz should be within 10% of 100 Hz family"); } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/packet_hash.rs
huginn-net-tcp/tests/packet_hash.rs
use huginn_net_tcp::packet_hash::*; #[test] fn test_hash_ipv4_source_ip() { // Ethernet + IPv4 packet let mut packet = vec![0u8; 34]; // Ethernet header packet[12] = 0x08; // IPv4 EtherType packet[13] = 0x00; // IPv4 header (starts at byte 14) packet[14] = 0x45; // Version 4, IHL 5 // Source IP: 192.168.1.1 packet[26] = 192; packet[27] = 168; packet[28] = 1; packet[29] = 1; // Dest IP: 10.0.0.1 packet[30] = 10; packet[31] = 0; packet[32] = 0; packet[33] = 1; let hash1 = hash_source_ip(&packet); let hash2 = hash_source_ip(&packet); // Same packet should always produce same hash assert_eq!(hash1, hash2); } #[test] fn test_hash_ipv4_different_sources() { let mut packet1 = vec![0u8; 34]; let mut packet2 = vec![0u8; 34]; // Setup both as valid IPv4 packets for packet in [&mut packet1, &mut packet2] { packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x45; } // Packet 1: 192.168.1.1 packet1[26..30].copy_from_slice(&[192, 168, 1, 1]); packet1[30..34].copy_from_slice(&[10, 0, 0, 1]); // Packet 2: 192.168.1.2 (different source IP) packet2[26..30].copy_from_slice(&[192, 168, 1, 2]); packet2[30..34].copy_from_slice(&[10, 0, 0, 1]); let hash1 = hash_source_ip(&packet1); let hash2 = hash_source_ip(&packet2); // Different source IPs should produce different hashes // (not guaranteed, but very likely with good hash function) assert_ne!(hash1, hash2); } #[test] fn test_hash_ipv6_source_ip() { let mut packet = vec![0u8; 54]; // Ethernet header packet[12] = 0x86; // IPv6 EtherType packet[13] = 0xDD; // IPv6 header (starts at byte 14) packet[14] = 0x60; // Version 6 // Source IP (16 bytes starting at byte 22) packet[22..38].copy_from_slice(&[ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, ]); // Dest IP (16 bytes) packet[38..54].copy_from_slice(&[ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]); let hash1 = hash_source_ip(&packet); let hash2 = hash_source_ip(&packet); assert_eq!(hash1, hash2); } #[test] fn test_hash_ipv6_different_sources() { let mut packet1 = vec![0u8; 54]; let mut packet2 = vec![0u8; 54]; // Setup both as valid IPv6 packets for packet in [&mut packet1, &mut packet2] { packet[12] = 0x86; packet[13] = 0xDD; packet[14] = 0x60; } // Packet 1: 2001:db8::1 packet1[22..38].copy_from_slice(&[ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, ]); // Packet 2: 2001:db8::2 (different source IP) packet2[22..38].copy_from_slice(&[ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]); let hash1 = hash_source_ip(&packet1); let hash2 = hash_source_ip(&packet2); // Different source IPs should produce different hashes assert_ne!(hash1, hash2); } #[test] fn test_hash_fallback_on_short_packet() { let short_packet = vec![0u8; 10]; // Should not panic, should use fallback hash let hash = hash_source_ip(&short_packet); // Verify hash is consistent assert_eq!(hash, hash_source_ip(&short_packet)); } #[test] fn test_hash_fallback_on_invalid_ip_version() { let mut packet = vec![0u8; 40]; packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x75; // Invalid IP version (7) // Should not panic, should use fallback hash let hash = hash_source_ip(&packet); // Verify hash is consistent assert_eq!(hash, hash_source_ip(&packet)); } #[test] fn test_hash_raw_ip_packet() { // Packet without Ethernet header (raw IP) let mut packet = vec![0u8; 20]; packet[0] = 0x45; // IPv4 version 4, IHL 5 // Source IP: 192.168.1.1 packet[12] = 192; packet[13] = 168; packet[14] = 1; packet[15] = 1; let hash1 = hash_source_ip(&packet); let hash2 = hash_source_ip(&packet); assert_eq!(hash1, hash2); } #[test] fn test_hash_consistency_same_source() { let mut packet1 = vec![0u8; 40]; let mut packet2 = vec![0u8; 60]; // Different total length // Both packets from same source for packet in [&mut packet1, &mut packet2] { packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x45; packet[26..30].copy_from_slice(&[192, 168, 1, 1]); } let hash1 = hash_source_ip(&packet1); let hash2 = hash_source_ip(&packet2); // Same source IP should produce same hash regardless of packet length assert_eq!(hash1, hash2); } #[test] fn test_hash_worker_distribution() { // Test that hashing distributes packets across workers let mut packets = Vec::new(); for i in 0..10 { let mut packet = vec![0u8; 34]; packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x45; // Different source IPs packet[26] = 192; packet[27] = 168; packet[28] = 1; packet[29] = i; packets.push(packet); } let num_workers = 4; let mut worker_counts = vec![0; num_workers]; for packet in &packets { let hash = hash_source_ip(packet); let worker_id = hash % num_workers; worker_counts[worker_id] += 1; } // Verify all workers got at least one packet (with high probability) // This is probabilistic, but with 10 packets and 4 workers, it's very likely let used_workers = worker_counts.iter().filter(|&&count| count > 0).count(); assert!(used_workers >= 2, "Expected at least 2 workers to be used, got {used_workers}"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/signature_matcher.rs
huginn-net-tcp/tests/signature_matcher.rs
use huginn_net_db::observable_signals::TcpObservation; use huginn_net_db::tcp::{IpVersion, PayloadSize, Quirk, TcpOption, Ttl, WindowSize}; use huginn_net_db::{Database, Type}; use huginn_net_tcp::{ObservableTcp, SignatureMatcher}; #[test] fn matching_linux_by_tcp_request() { let db = match Database::load_default() { Ok(db) => db, Err(e) => { panic!("Failed to create default database: {e}"); } }; //sig: 4:58+6:0:1452:mss*44,7:mss,sok,ts,nop,ws:df,id+:0 let linux_signature = ObservableTcp { matching: TcpObservation { version: IpVersion::V4, ittl: Ttl::Distance(58, 6), olen: 0, mss: Some(1452), wsize: WindowSize::Mss(44), wscale: Some(7), olayout: vec![ TcpOption::Mss, TcpOption::Sok, TcpOption::TS, TcpOption::Nop, TcpOption::Ws, ], quirks: vec![Quirk::Df, Quirk::NonZeroID], pclass: PayloadSize::Zero, }, }; let matcher = SignatureMatcher::new(&db); if let Some((label, _matched_db_sig, quality)) = matcher.matching_by_tcp_request(&linux_signature) { assert_eq!(label.name, "Linux"); assert_eq!(label.class, Some("unix".to_string())); assert_eq!(label.flavor, Some("2.2.x-3.x".to_string())); assert_eq!(label.ty, Type::Generic); assert_eq!(quality, 1.0); } else { panic!("No match found"); } } #[test] fn matching_android_by_tcp_request() { let db = match Database::load_default() { Ok(db) => db, Err(e) => { panic!("Failed to create default database: {e}"); } }; //sig: "4:64+0:0:1460:65535,8:mss,sok,ts,nop,ws:df,id+:0" let android_signature = ObservableTcp { matching: TcpObservation { version: IpVersion::V4, ittl: Ttl::Value(64), olen: 0, mss: Some(1460), wsize: WindowSize::Value(65535), wscale: Some(8), olayout: vec![ TcpOption::Mss, TcpOption::Sok, TcpOption::TS, TcpOption::Nop, TcpOption::Ws, ], quirks: vec![Quirk::Df, Quirk::NonZeroID], pclass: PayloadSize::Zero, }, }; //sig: "4:57+7:0:1460:65535,8:mss,sok,ts,nop,ws:df,id+:0" let android_signature_with_distance = ObservableTcp { matching: TcpObservation { version: IpVersion::V4, ittl: Ttl::Distance(57, 7), olen: 0, mss: Some(1460), wsize: WindowSize::Value(65535), wscale: Some(8), olayout: vec![ TcpOption::Mss, TcpOption::Sok, TcpOption::TS, TcpOption::Nop, TcpOption::Ws, ], quirks: vec![Quirk::Df, Quirk::NonZeroID], pclass: PayloadSize::Zero, }, }; let matcher = SignatureMatcher::new(&db); if let Some((label, _matched_db_sig, quality)) = matcher.matching_by_tcp_request(&android_signature) { assert_eq!(label.name, "Linux"); assert_eq!(label.class, Some("unix".to_string())); assert_eq!(label.flavor, Some("Android".to_string())); assert_eq!(label.ty, Type::Specified); assert_eq!(quality, 1.0); } else { panic!("No match found"); } if let Some((label, _matched_db_sig, quality)) = matcher.matching_by_tcp_request(&android_signature_with_distance) { assert_eq!(label.name, "Linux"); assert_eq!(label.class, Some("unix".to_string())); assert_eq!(label.flavor, Some("Android".to_string())); assert_eq!(label.ty, Type::Specified); assert_eq!(quality, 1.0); } else { panic!("No match found"); } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/golden_tests.rs
huginn-net-tcp/tests/golden_tests.rs
use huginn_net_db::Database; use huginn_net_tcp::{HuginnNetTcp, HuginnNetTcpError, TcpAnalysisResult}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::Path; use std::sync::mpsc; #[derive(Debug, Deserialize, Serialize)] struct PcapSnapshot { pcap_file: String, pcap_path: String, expected_connections: usize, connections: Vec<ConnectionSnapshot>, } #[derive(Debug, Deserialize, Serialize)] struct ConnectionSnapshot { source: EndpointSnapshot, destination: EndpointSnapshot, tcp_analysis: TcpAnalysisSnapshot, } #[derive(Debug, Deserialize, Serialize)] struct EndpointSnapshot { ip: String, port: u16, } #[derive(Debug, Deserialize, Serialize)] struct TcpAnalysisSnapshot { syn: Option<SynSnapshot>, syn_ack: Option<SynAckSnapshot>, mtu: Option<MtuSnapshot>, client_uptime: Option<UptimeSnapshot>, server_uptime: Option<UptimeSnapshot>, } #[derive(Debug, Deserialize, Serialize)] struct SynSnapshot { os_name: Option<String>, os_family: Option<String>, os_variant: Option<String>, quality: String, raw_signature: String, } #[derive(Debug, Deserialize, Serialize)] struct SynAckSnapshot { os_name: Option<String>, os_family: Option<String>, os_variant: Option<String>, quality: String, raw_signature: String, } #[derive(Debug, Deserialize, Serialize)] struct MtuSnapshot { link_type: String, raw_mtu: u16, } #[derive(Debug, Deserialize, Serialize)] struct UptimeSnapshot { uptime_days: u32, uptime_hours: u32, uptime_minutes: u32, modulo_days: u32, raw_frequency: f64, } fn load_snapshot(pcap_file: &str) -> PcapSnapshot { let snapshot_path = format!("tests/snapshots/{pcap_file}.json"); let snapshot_content = fs::read_to_string(&snapshot_path) .unwrap_or_else(|_| panic!("Failed to read snapshot file: {snapshot_path}")); serde_json::from_str(&snapshot_content) .unwrap_or_else(|e| panic!("Failed to parse snapshot JSON: {e}")) } fn analyze_pcap_file(pcap_path: &str) -> Result<Vec<TcpAnalysisResult>, HuginnNetTcpError> { assert!(Path::new(pcap_path).exists(), "PCAP file must exist: {pcap_path}"); // Load the default database for TCP analysis let db = Database::load_default() .map_err(|e| HuginnNetTcpError::Parse(format!("Failed to load database: {e}")))?; let mut analyzer = HuginnNetTcp::new(Some(std::sync::Arc::new(db)), 1000)?; let (sender, receiver) = mpsc::channel::<TcpAnalysisResult>(); let pcap_file_str = pcap_path.to_string(); // Run analysis in the same thread to avoid lifetime issues analyzer.analyze_pcap(&pcap_file_str, sender, None)?; let mut results = Vec::new(); for tcp_output in receiver { // Only include results that have meaningful TCP data if has_meaningful_tcp_data(&tcp_output) { results.push(tcp_output); } } Ok(results) } /// Check if a TCP analysis result has meaningful data for golden tests /// Includes SYN, SYN-ACK, MTU, and uptime packets. fn has_meaningful_tcp_data(result: &TcpAnalysisResult) -> bool { result.syn.is_some() || result.syn_ack.is_some() || result.mtu.is_some() || result.client_uptime.is_some() || result.server_uptime.is_some() } fn assert_connection_matches_snapshot( actual: &TcpAnalysisResult, expected: &ConnectionSnapshot, connection_index: usize, ) { // Check SYN data if present if let (Some(actual_syn), Some(expected_syn)) = (&actual.syn, &expected.tcp_analysis.syn) { assert_eq!( actual_syn.source.ip.to_string(), expected.source.ip, "Connection {connection_index}: SYN source IP mismatch" ); assert_eq!( actual_syn.source.port, expected.source.port, "Connection {connection_index}: SYN source port mismatch" ); assert_eq!( actual_syn.destination.ip.to_string(), expected.destination.ip, "Connection {connection_index}: SYN destination IP mismatch" ); assert_eq!( actual_syn.destination.port, expected.destination.port, "Connection {connection_index}: SYN destination port mismatch" ); // Check OS matching if let Some(expected_os_name) = &expected_syn.os_name { assert!( actual_syn.os_matched.os.is_some(), "Connection {connection_index}: Expected OS match but found none" ); if let Some(actual_os) = &actual_syn.os_matched.os { assert_eq!( actual_os.name, *expected_os_name, "Connection {connection_index}: SYN OS name mismatch" ); } } assert_eq!( format!("{:?}", actual_syn.os_matched.quality), expected_syn.quality, "Connection {connection_index}: SYN quality mismatch" ); assert_eq!( actual_syn.sig.to_string(), expected_syn.raw_signature, "Connection {connection_index}: SYN raw signature mismatch" ); } // Check SYN-ACK data if present if let (Some(actual_syn_ack), Some(expected_syn_ack)) = (&actual.syn_ack, &expected.tcp_analysis.syn_ack) { assert_eq!( actual_syn_ack.source.ip.to_string(), expected.source.ip, "Connection {connection_index}: SYN-ACK source IP mismatch" ); assert_eq!( actual_syn_ack.source.port, expected.source.port, "Connection {connection_index}: SYN-ACK source port mismatch" ); // Check OS matching if let Some(expected_os_name) = &expected_syn_ack.os_name { assert!( actual_syn_ack.os_matched.os.is_some(), "Connection {connection_index}: Expected SYN-ACK OS match but found none" ); if let Some(actual_os) = &actual_syn_ack.os_matched.os { assert_eq!( actual_os.name, *expected_os_name, "Connection {connection_index}: SYN-ACK OS name mismatch" ); } } assert_eq!( format!("{:?}", actual_syn_ack.os_matched.quality), expected_syn_ack.quality, "Connection {connection_index}: SYN-ACK quality mismatch" ); } // Check MTU data if present if let (Some(actual_mtu), Some(expected_mtu)) = (&actual.mtu, &expected.tcp_analysis.mtu) { assert_eq!( actual_mtu.mtu, expected_mtu.raw_mtu, "Connection {connection_index}: MTU raw value mismatch" ); } // Check client uptime data if present if let (Some(actual_uptime), Some(expected_uptime)) = (&actual.client_uptime, &expected.tcp_analysis.client_uptime) { assert_eq!( actual_uptime.freq, expected_uptime.raw_frequency, "Connection {connection_index}: Client uptime raw frequency mismatch" ); assert_eq!( actual_uptime.days, expected_uptime.uptime_days, "Connection {connection_index}: Client uptime days mismatch" ); } // Check server uptime data if present if let (Some(actual_uptime), Some(expected_uptime)) = (&actual.server_uptime, &expected.tcp_analysis.server_uptime) { assert_eq!( actual_uptime.freq, expected_uptime.raw_frequency, "Connection {connection_index}: Server uptime raw frequency mismatch" ); assert_eq!( actual_uptime.days, expected_uptime.uptime_days, "Connection {connection_index}: Server uptime days mismatch" ); } } /// Golden test: compares PCAP analysis output against known-good JSON snapshots fn test_pcap_with_snapshot(pcap_file: &str) { let snapshot = load_snapshot(pcap_file); let results = analyze_pcap_file(&snapshot.pcap_path) .unwrap_or_else(|e| panic!("Failed to analyze PCAP file: {e}")); assert_eq!( results.len(), snapshot.expected_connections, "Expected {} connections in {}, found {}", snapshot.expected_connections, pcap_file, results.len() ); for (i, (actual, expected)) in results.iter().zip(snapshot.connections.iter()).enumerate() { assert_connection_matches_snapshot(actual, expected, i); } } #[test] fn test_golden_tcp_snapshots() { let golden_test_cases = [ "macos_tcp_flags.pcap", // Add more PCAP files here as golden tests: // "linux_syn.pcap", // "windows_tcp.pcap", ]; for pcap_file in golden_test_cases { println!("Running golden test for: {pcap_file}"); test_pcap_with_snapshot(pcap_file); } } #[test] fn test_macos_tcp_flags_pcap_snapshot() { test_pcap_with_snapshot("macos_tcp_flags.pcap"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/ip_options.rs
huginn-net-tcp/tests/ip_options.rs
use huginn_net_tcp::ip_options::IpOptions; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; #[test] fn test_ipv4_options_length() { let mut data = vec![0u8; 24]; data[0] = 0x46; // Version 4, IHL 6 let packet_opt = Ipv4Packet::new(&data); assert!(packet_opt.is_some(), "Failed to create IPv4 packet"); let packet = match packet_opt { Some(pkt) => pkt, None => panic!("Should not fail after assert"), }; assert_eq!(IpOptions::calculate_ipv4_length(&packet), 4); } #[test] fn test_ipv6_direct_tcp() { let mut data = vec![0u8; 40]; data[0] = 0x60; // Version 6 data[6] = IpNextHeaderProtocols::Tcp.0; // Next Header = TCP let packet_opt = Ipv6Packet::new(&data); assert!(packet_opt.is_some(), "Failed to create IPv6 packet"); let packet = match packet_opt { Some(pkt) => pkt, None => panic!("Should not fail after assert"), }; assert_eq!(IpOptions::calculate_ipv6_length(&packet), 0); } #[test] fn test_ipv6_fragment() { let mut data = vec![0u8; 48]; data[0] = 0x60; // Version 6 data[6] = IpNextHeaderProtocols::Ipv6Frag.0; // Next Header = Fragment data[4] = 0; // Length high byte data[5] = 8; // Length low byte data[40] = IpNextHeaderProtocols::Tcp.0; // Next Header = TCP let packet_opt = Ipv6Packet::new(&data); assert!(packet_opt.is_some(), "Failed to create IPv6 fragment packet"); let packet = match packet_opt { Some(pkt) => pkt, None => panic!("Should not fail after assert"), }; assert_eq!(IpOptions::calculate_ipv6_length(&packet), 8); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/backward_timestamps.rs
huginn-net-tcp/tests/backward_timestamps.rs
//! Tests for backward timestamp detection and handling use huginn_net_tcp::uptime::{check_ts_tcp, Connection}; use std::net::{IpAddr, Ipv4Addr}; use std::time::Duration; use ttl_cache::TtlCache; #[test] fn test_timestamp_wraparound_detection() { // Test detection of timestamp wraparound (32-bit overflow) let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100)), src_port: 12345, dst_ip: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 200)), dst_port: 80, }; // Simulate a timestamp near the 32-bit limit let near_max_timestamp = u32::MAX - 1000; // Close to overflow // Store SYN data with timestamp near maximum let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, near_max_timestamp); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Wait enough time to avoid MIN_TWAIT validation std::thread::sleep(Duration::from_millis(50)); // Simulate timestamp after wraparound (small value after overflow) let after_wraparound_timestamp = 1000; // Small value after overflow let (client_uptime, server_uptime) = check_ts_tcp( &mut connection_tracker, &server_connection, false, after_wraparound_timestamp, ); // The algorithm should detect this as a backward timestamp and handle it // It might return None due to other validations, but it should not panic or fail // The key is that it processes the wraparound case without errors println!("Wraparound test result: client={client_uptime:?}, server={server_uptime:?}"); } #[test] fn test_small_backward_movement_within_grace() { // Test small backward movements within the grace period (100ms) let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), src_port: 54321, dst_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)), dst_port: 443, }; // Store SYN data let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, 10000); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Wait a very short time (within grace period) std::thread::sleep(Duration::from_millis(30)); // Simulate a small backward movement (packet reordering) let backward_timestamp = 9998; // 2 ticks backward let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, backward_timestamp); // Should be rejected due to small backward movement within grace period assert!( client_uptime.is_none() && server_uptime.is_none(), "Small backward movement within grace period should be rejected" ); } #[test] fn test_large_backward_movement() { // Test large backward movements (likely NAT/load balancer change) let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(172, 16, 1, 1)), src_port: 8080, dst_ip: IpAddr::V4(Ipv4Addr::new(172, 16, 1, 2)), dst_port: 9090, }; // Store SYN data with high timestamp let high_timestamp = 1_000_000; let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, high_timestamp); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Wait sufficient time std::thread::sleep(Duration::from_millis(200)); // Simulate large backward movement (different server behind NAT) let low_timestamp = 100_000; // Much lower timestamp let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, low_timestamp); // The algorithm should detect this as a large backward movement // and attempt to calculate frequency using the inverted difference println!( "Large backward movement test result: client={client_uptime:?}, server={server_uptime:?}" ); } #[test] fn test_normal_forward_progression() { // Test normal forward timestamp progression (baseline) let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(203, 0, 113, 1)), src_port: 12345, dst_ip: IpAddr::V4(Ipv4Addr::new(203, 0, 113, 2)), dst_port: 80, }; // Store SYN data let syn_timestamp = 500_000; let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, syn_timestamp); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Wait sufficient time std::thread::sleep(Duration::from_millis(100)); // Normal forward progression let forward_timestamp = 500_100; // 100 ticks forward let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, forward_timestamp); // This should be processed normally (though may fail other validations) println!("Normal forward progression test result: client={client_uptime:?}, server={server_uptime:?}"); } #[test] fn test_wraparound_calculation_logic() { // Test the mathematical logic for wraparound detection // p0f uses: is_backward = (ts_diff > !ts_diff) // This happens when ts_diff is in the upper half of u32 range (> u32::MAX/2) println!("=== Testing Wraparound Detection Logic ==="); let threshold = u32::MAX / 2; println!("Threshold (u32::MAX/2): {threshold}\n"); // Case 1: Normal forward progression let ts_ref1 = 1000u32; let ts_cur1 = 2000u32; let diff1 = ts_cur1.wrapping_sub(ts_ref1); let inv1 = !diff1; let is_backward1 = diff1 > inv1; println!("Case 1 - Forward progression:"); println!( " ref={ts_ref1}, cur={ts_cur1}, diff={diff1}, !diff={inv1}, is_backward={is_backward1}" ); assert!(!is_backward1, "Forward progression should NOT be detected as backward"); assert!(diff1 <= threshold, "Forward progression: diff should be <= threshold"); // Case 2: Small backward movement let ts_ref2 = 2000u32; let ts_cur2 = 1000u32; let diff2 = ts_cur2.wrapping_sub(ts_ref2); let inv2 = !diff2; let is_backward2 = diff2 > inv2; println!("Case 2 - Small backward movement:"); println!( " ref={ts_ref2}, cur={ts_cur2}, diff={diff2}, !diff={inv2}, is_backward={is_backward2}" ); assert!(is_backward2, "Small backward movement SHOULD be detected as backward"); assert!(diff2 > threshold, "Small backward: diff should be > threshold"); // Case 3: Large backward movement (what p0f calls "wraparound") let ts_ref3 = 100u32; let ts_cur3 = u32::MAX - 100; // Very large number let diff3 = ts_cur3.wrapping_sub(ts_ref3); let inv3 = !diff3; let is_backward3 = diff3 > inv3; println!("Case 3 - Large backward movement:"); println!( " ref={ts_ref3}, cur={ts_cur3}, diff={diff3}, !diff={inv3}, is_backward={is_backward3}" ); assert!(is_backward3, "Large backward movement SHOULD be detected as backward"); assert!(diff3 > threshold, "Large backward: diff should be > threshold"); // Case 4: True overflow scenario (timestamp counter wrapped around) let ts_ref4 = u32::MAX - 100; let ts_cur4 = 100u32; let diff4 = ts_cur4.wrapping_sub(ts_ref4); let inv4 = !diff4; let is_backward4 = diff4 > inv4; println!("Case 4 - True overflow (legitimate wraparound):"); println!( " ref={ts_ref4}, cur={ts_cur4}, diff={diff4}, !diff={inv4}, is_backward={is_backward4}" ); assert!( !is_backward4, "True overflow should NOT be detected as backward (small positive result)" ); assert!(diff4 <= threshold, "True overflow: diff should be <= threshold"); println!("\n✓ All wraparound detection cases validated correctly"); } #[test] fn test_frequency_calculation_with_wraparound() { // Test frequency calculation logic for wraparound scenarios // Simulate a realistic backward timestamp scenario // This represents a case where timestamp went backward significantly let ts_reference = 1_000_000u32; // High reference timestamp let ts_current = 100_000u32; // Much lower current timestamp (backward jump) let time_diff_ms = 100; // 100ms elapsed let ts_diff = ts_current.wrapping_sub(ts_reference); let inverted_diff = !ts_diff; // Check if this is detected as backward movement if ts_diff > inverted_diff { // This should be detected as wraparound let effective_diff = inverted_diff; let frequency = (effective_diff as f64 * 1000.0) / time_diff_ms as f64; println!("Backward movement detected:"); println!(" ts_reference: {ts_reference}"); println!(" ts_current: {ts_current}"); println!(" ts_diff: {ts_diff}"); println!(" inverted_diff: {inverted_diff}"); println!(" effective_diff: {effective_diff}"); println!(" calculated frequency: {frequency:.2} Hz"); // The frequency should be reasonable for the inverted difference assert!( frequency > 0.0 && frequency < 50_000_000.0, "Calculated frequency {frequency} Hz should be reasonable" ); } else { // This is actually the expected case for this scenario // Let's calculate what we expect println!("Forward progression detected (this might be correct):"); println!(" ts_reference: {ts_reference}"); println!(" ts_current: {ts_current}"); println!(" ts_diff: {ts_diff}"); println!(" inverted_diff: {inverted_diff}"); } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/parallel_tests.rs
huginn-net-tcp/tests/parallel_tests.rs
use huginn_net_tcp::parallel::{DispatchResult, PoolStats, WorkerPool, WorkerStats}; use huginn_net_tcp::HuginnNetTcpError; use std::sync::mpsc; use std::sync::Arc; use std::thread; fn unwrap_worker_pool(result: Result<WorkerPool, HuginnNetTcpError>) -> WorkerPool { match result { Ok(pool) => pool, Err(e) => panic!("Failed to create WorkerPool: {e}"), } } /// Helper to create a minimal Ethernet + IPv4 + TCP packet with a specific source IP fn create_ipv4_packet(src_ip: [u8; 4]) -> Vec<u8> { let mut packet = vec![0u8; 54]; // Ethernet (14) + IPv4 (20) + TCP (20) // Ethernet header packet[12] = 0x08; // EtherType IPv4 packet[13] = 0x00; // IPv4 header (starts at offset 14) packet[14] = 0x45; // Version 4, IHL 5 packet[23] = 0x06; // Protocol TCP // Source IP (offset 26-29) packet[26..30].copy_from_slice(&src_ip); // Destination IP (offset 30-33) packet[30..34].copy_from_slice(&[10, 0, 0, 2]); packet } #[test] fn test_worker_pool_rejects_zero_workers() { let (tx, _rx) = mpsc::channel(); let result = WorkerPool::new(0, 100, 32, 10, tx, None, 1000, None); assert!(result.is_err()); } #[test] fn test_worker_pool_creates_with_valid_workers() { let (tx, _rx) = mpsc::channel(); let result = WorkerPool::new(4, 100, 32, 10, tx, None, 1000, None); assert!(result.is_ok()); let pool = unwrap_worker_pool(result); assert_eq!(pool.num_workers.get(), 4); } #[test] fn test_hash_based_dispatch_consistency() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(4, 10, 32, 10, tx, None, 1000, None)); // Create packets with the same source IP let src_ip = [192, 168, 1, 100]; let packets: Vec<_> = (0..10).map(|_| create_ipv4_packet(src_ip)).collect(); // Dispatch all packets for packet in packets { pool.dispatch(packet); } let stats = pool.stats(); // All packets should go to the same worker (hash-based routing) let workers_with_packets = stats .workers .iter() .filter(|w| w.queue_size > 0 || w.dropped > 0) .count(); assert_eq!( workers_with_packets, 1, "Expected all packets from same source IP to go to same worker" ); } #[test] fn test_different_ips_distributed() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(4, 10, 32, 10, tx, None, 1000, None)); // Create packets with different source IPs let src_ips = vec![ [192, 168, 1, 1], [192, 168, 1, 2], [192, 168, 1, 3], [192, 168, 1, 4], [10, 0, 0, 1], [10, 0, 0, 2], [10, 0, 0, 3], [10, 0, 0, 4], ]; for src_ip in src_ips { pool.dispatch(create_ipv4_packet(src_ip)); } let stats = pool.stats(); // Packets should be distributed across workers let workers_with_packets = stats.workers.iter().filter(|w| w.queue_size > 0).count(); assert!( workers_with_packets > 1, "Expected packets from different IPs to be distributed" ); } #[test] fn test_queue_overflow_handling() { let (tx, _rx) = mpsc::channel(); let queue_size = 5; let pool = unwrap_worker_pool(WorkerPool::new(2, queue_size, 32, 10, tx, None, 1000, None)); let mut queued = 0; let mut dropped = 0; // Try to dispatch many packets to overflow queues // Use same IP so they all go to same worker let src_ip = [192, 168, 1, 100]; for _ in 0..100 { match pool.dispatch(create_ipv4_packet(src_ip)) { DispatchResult::Queued => queued += 1, DispatchResult::Dropped => dropped += 1, } } // Should have some dropped packets due to queue overflow assert!(dropped > 0, "Expected some packets to be dropped"); assert!(queued > 0, "Expected some packets to be queued"); let stats = pool.stats(); assert_eq!(stats.total_dispatched, queued); assert_eq!(stats.total_dropped, dropped); } #[test] fn test_stats_accuracy() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(2, 100, 32, 10, tx, None, 1000, None)); // Dispatch some packets let dispatch_count = 10; for i in 0..dispatch_count { pool.dispatch(create_ipv4_packet([192, 168, 1, i as u8])); } let stats = pool.stats(); assert_eq!(stats.total_dispatched, dispatch_count); assert_eq!(stats.workers.len(), 2); } #[test] fn test_shutdown_stops_accepting_packets() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(2, 100, 32, 10, tx, None, 1000, None)); // Dispatch before shutdown should work let result = pool.dispatch(create_ipv4_packet([192, 168, 1, 1])); assert_eq!(result, DispatchResult::Queued); // Shutdown the pool pool.shutdown(); // Dispatch after shutdown should return Dropped let result = pool.dispatch(create_ipv4_packet([192, 168, 1, 1])); assert_eq!(result, DispatchResult::Dropped); } #[test] fn test_per_worker_dropped_count() { let (tx, _rx) = mpsc::channel(); let queue_size = 2; let pool = unwrap_worker_pool(WorkerPool::new(1, queue_size, 32, 10, tx, None, 1000, None)); // Fill the single worker's queue (same source IP) let src_ip = [192, 168, 1, 100]; for _ in 0..10 { pool.dispatch(create_ipv4_packet(src_ip)); } let stats = pool.stats(); // Should have dropped packets assert!(stats.total_dropped > 0); // The single worker should have the drops assert_eq!(stats.workers.len(), 1); assert_eq!(stats.workers[0].dropped, stats.total_dropped); } #[test] fn test_concurrent_dispatch() { let (tx, _rx) = mpsc::channel(); let pool = Arc::new(unwrap_worker_pool(WorkerPool::new(4, 100, 32, 10, tx, None, 1000, None))); let handles: Vec<_> = (0..4) .map(|thread_id| { let pool_clone = Arc::clone(&pool); thread::spawn(move || { for i in 0..25 { let src_ip = [192, 168, thread_id, i]; pool_clone.dispatch(create_ipv4_packet(src_ip)); } }) }) .collect(); for handle in handles { if handle.join().is_err() { panic!("Thread panicked during concurrent dispatch test"); } } let stats = pool.stats(); // 4 threads * 25 dispatches = 100 total (some might be queued, some dropped) let total_processed = stats.total_dispatched + stats.total_dropped; assert_eq!(total_processed, 100); } #[test] fn test_worker_stats_display() { let worker = WorkerStats { id: 0, queue_size: 5, dropped: 10 }; let output = format!("{worker}"); assert!(output.contains("Worker 0")); assert!(output.contains("queue_size=5")); assert!(output.contains("dropped=10")); } #[test] fn test_pool_stats_display() { let stats = PoolStats { total_dispatched: 100, total_dropped: 5, workers: vec![ WorkerStats { id: 0, queue_size: 2, dropped: 3 }, WorkerStats { id: 1, queue_size: 1, dropped: 2 }, ], }; let output = format!("{stats}"); assert!(output.contains("dispatched: 100")); assert!(output.contains("dropped: 5")); assert!(output.contains("Worker 0")); assert!(output.contains("Worker 1")); } #[test] fn test_state_isolation() { let (tx, _rx) = mpsc::channel(); let pool = unwrap_worker_pool(WorkerPool::new(3, 100, 32, 10, tx, None, 1000, None)); // Dispatch packets from 3 different IPs (should go to different workers) let ips = [[10, 0, 0, 1], [10, 0, 0, 2], [10, 0, 0, 3]]; for ip in &ips { // Send multiple packets from each IP for _ in 0..5 { pool.dispatch(create_ipv4_packet(*ip)); } } let stats = pool.stats(); assert_eq!(stats.total_dispatched, 15); // Verify workers have received packets let active_workers = stats.workers.iter().filter(|w| w.queue_size > 0).count(); assert!(active_workers > 0, "Expected at least one worker to have packets"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/uptime_validations.rs
huginn-net-tcp/tests/uptime_validations.rs
//! Tests for uptime calculation validations use huginn_net_tcp::uptime::{check_ts_tcp, Connection}; use std::net::{IpAddr, Ipv4Addr}; use std::time::Duration; use ttl_cache::TtlCache; #[test] fn test_min_twait_validation() { // Test that intervals shorter than MIN_TWAIT (25ms) are rejected let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), src_port: 12345, dst_ip: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 2)), dst_port: 80, }; // First, simulate storing SYN data (from_client = true) let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, 1000); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); // Now simulate a very quick response (< 25ms) - should be rejected // We can't easily control the timing in this test, but we can verify the logic // by checking that very small timestamp differences are rejected let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Try with a very small timestamp difference (< 5 ticks) let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, 1003); // Only 3 ticks difference assert!( client_uptime.is_none() && server_uptime.is_none(), "Should reject timestamp differences < MIN_TS_DIFF (5 ticks)" ); } #[test] fn test_min_ts_diff_validation() { // Test that timestamp differences smaller than MIN_TS_DIFF (5 ticks) are rejected let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)), src_port: 54321, dst_ip: IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)), dst_port: 443, }; // Store SYN data let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, 5000); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Test various small timestamp differences let test_cases = [ (5000, "Same timestamp should be rejected"), (5001, "1 tick difference should be rejected"), (5002, "2 tick difference should be rejected"), (5003, "3 tick difference should be rejected"), (5004, "4 tick difference should be rejected"), ]; for (ts_val, description) in test_cases { // Reset the connection tracker for each test connection_tracker.clear(); let _ = check_ts_tcp(&mut connection_tracker, &connection, true, 5000); let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, ts_val); assert!(client_uptime.is_none() && server_uptime.is_none(), "{}", description); } } #[test] fn test_valid_timestamp_difference() { // Test that valid timestamp differences (>= MIN_TS_DIFF) are processed let mut connection_tracker = TtlCache::new(100); let connection = Connection { src_ip: IpAddr::V4(Ipv4Addr::new(172, 16, 0, 1)), src_port: 8080, dst_ip: IpAddr::V4(Ipv4Addr::new(172, 16, 0, 2)), dst_port: 9090, }; // Store SYN data let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, 10000); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN packet should not return uptime calculation" ); let server_connection = Connection { src_ip: connection.dst_ip, src_port: connection.dst_port, dst_ip: connection.src_ip, dst_port: connection.src_port, }; // Wait enough time to ensure we don't hit MIN_TWAIT validation (25ms) std::thread::sleep(Duration::from_millis(30)); // Test with exactly MIN_TS_DIFF (5 ticks) - should pass MIN_TS_DIFF validation let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, 10005); // Note: Result might still be None due to frequency being too low/high for the time interval // but it should NOT fail on MIN_TS_DIFF validation println!( "MIN_TS_DIFF test (5 ticks): client={:?}, server={:?}", client_uptime.is_some(), server_uptime.is_some() ); // Test with realistic timestamp difference for 250 Hz system // Using 250 Hz is more reliable as it gives us more tolerance for timing variations connection_tracker.clear(); let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &connection, true, 1_000_000); assert!( client_uptime.is_none() && server_uptime.is_none(), "SYN should not calculate uptime" ); // Wait long enough for a realistic frequency calculation (100ms) std::thread::sleep(Duration::from_millis(100)); // With 25 ticks over ~100ms, frequency should be ~250 Hz (valid range) // This is more tolerant to timing variations: // - If sleep is 90ms: 25/0.09 = 277 Hz ✓ // - If sleep is 100ms: 25/0.10 = 250 Hz ✓ // - If sleep is 110ms: 25/0.11 = 227 Hz ✓ let (client_uptime, server_uptime) = check_ts_tcp(&mut connection_tracker, &server_connection, false, 1_000_025); // This should pass: // - MIN_TS_DIFF: 25 ticks >> 5 ticks ✓ // - MIN_TWAIT: ~100ms > 25ms ✓ // - Frequency: ~250 Hz is in valid range (100-1500 Hz) ✓ // The calculation might fail due to timing variations, so we don't assert success // but we do verify that if it succeeds, the frequency is reasonable if let Some(uptime) = client_uptime.or(server_uptime) { println!( "Successfully calculated uptime: freq={:.2} Hz, days={}, hrs={}, min={}", uptime.freq, uptime.days, uptime.hours, uptime.min ); // Verify frequency is in valid range (should be rounded to 250 Hz) assert!( uptime.freq >= 100.0 && uptime.freq <= 500.0, "Calculated frequency should be in range 100-500 Hz (expected ~250 Hz), got {} Hz", uptime.freq ); } else { println!( "Note: Uptime calculation may fail due to system timing variations. \ This is expected in timing-sensitive tests." ); } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/uptime_tracker.rs
huginn-net-tcp/tests/uptime_tracker.rs
use huginn_net_tcp::{calculate_uptime_improved, FrequencyState, TcpTimestamp, UptimeTracker}; #[test] fn test_improved_uptime_tracking() { let mut tracker = UptimeTracker::new(); // Test initial state assert!(!tracker.has_valid_client_frequency()); assert!(!tracker.has_valid_server_frequency()); assert!(matches!(tracker.cli_freq, FrequencyState::NotCalculated)); assert!(matches!(tracker.srv_freq, FrequencyState::NotCalculated)); // Simulate SYN packet (client -> server) let syn_result = calculate_uptime_improved(&mut tracker, 1000000, true); assert!(syn_result.is_none()); // SYN packets don't calculate uptime assert!(tracker.last_syn.is_some()); // Simulate SYN+ACK packet (server -> client) after 100ms // This should calculate frequency and uptime std::thread::sleep(std::time::Duration::from_millis(100)); let synack_result = calculate_uptime_improved(&mut tracker, 1001000, false); if let Some(uptime) = synack_result { println!("Calculated uptime: {uptime:?}"); // Should have calculated a reasonable frequency (around 1000 Hz) assert!(tracker.has_valid_client_frequency()); if let Some(freq) = tracker.cli_freq.value() { assert!(freq > 900 && freq < 1100); } // Uptime should be reasonable (based on timestamp value) assert!(uptime.freq > 900.0 && uptime.freq < 1100.0); assert!(uptime.up_mod_days > 40 && uptime.up_mod_days < 60); // ~50 days for 1000 Hz } else { // If calculation failed, check why if tracker.cli_freq.is_invalid() { println!("Client frequency marked as bad"); } else { println!("No uptime calculated - tracker state: {tracker:?}"); } } } #[test] fn test_bad_frequency_handling() { let mut tracker = UptimeTracker::new(); // Store SYN timestamp let _syn_result = calculate_uptime_improved(&mut tracker, 1000000, true); // Try with a timestamp that would result in invalid frequency // (too small time difference) let bad_result = calculate_uptime_improved(&mut tracker, 1000001, false); // Should fail and mark frequency as bad assert!(bad_result.is_none()); // The frequency might be marked as bad depending on timing // Try again - should skip calculation since frequency is bad let retry_result = calculate_uptime_improved(&mut tracker, 1000100, false); if tracker.cli_freq.is_invalid() { assert!(retry_result.is_none()); println!("Correctly skipped calculation for bad frequency"); } } #[test] fn test_frequency_reuse() { let mut tracker = UptimeTracker::new(); // Store SYN timestamp let _syn_result = calculate_uptime_improved(&mut tracker, 1000000, true); // Calculate frequency once std::thread::sleep(std::time::Duration::from_millis(100)); let first_result = calculate_uptime_improved(&mut tracker, 1001000, false); if let Some(_uptime1) = first_result { let stored_frequency = tracker.cli_freq; assert!(tracker.has_valid_client_frequency()); // Use the same frequency for subsequent calculations let second_result = calculate_uptime_improved(&mut tracker, 1002000, false); if let Some(_uptime2) = second_result { // Frequency should remain the same (reused) assert_eq!(tracker.cli_freq, stored_frequency); if let Some(freq) = stored_frequency.value() { println!("Successfully reused frequency: {freq} Hz"); } } } } #[test] fn test_tcp_timestamp_creation() { // Test TcpTimestamp creation methods let ts1 = TcpTimestamp::new(12345, 67890); assert_eq!(ts1.ts_val, 12345); assert_eq!(ts1.recv_time_ms, 67890); let ts2 = TcpTimestamp::now(54321); assert_eq!(ts2.ts_val, 54321); assert!(ts2.recv_time_ms > 0); // Should have current time } #[test] fn test_uptime_tracker_methods() { let mut tracker = UptimeTracker::new(); // Test initial state assert!(!tracker.has_valid_client_frequency()); assert!(!tracker.has_valid_server_frequency()); assert!(matches!(tracker.cli_freq, FrequencyState::NotCalculated)); assert!(matches!(tracker.srv_freq, FrequencyState::NotCalculated)); // Test marking frequencies as bad tracker.mark_client_frequency_bad(); assert!(matches!(tracker.cli_freq, FrequencyState::Invalid)); assert!(!tracker.has_valid_client_frequency()); tracker.mark_server_frequency_bad(); assert!(matches!(tracker.srv_freq, FrequencyState::Invalid)); assert!(!tracker.has_valid_server_frequency()); // Test setting valid frequencies tracker.cli_freq = FrequencyState::Valid(1000); tracker.srv_freq = FrequencyState::Valid(100); assert!(tracker.has_valid_client_frequency()); assert!(tracker.has_valid_server_frequency()); assert_eq!(tracker.cli_freq.value(), Some(1000)); assert_eq!(tracker.srv_freq.value(), Some(100)); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/ttl.rs
huginn-net-tcp/tests/ttl.rs
use huginn_net_db::tcp::Ttl; use huginn_net_tcp::ttl::{calculate_ttl, guess_distance}; #[test] fn test_guess_distance() { assert_eq!(guess_distance(32), 0); assert_eq!(guess_distance(64), 0); assert_eq!(guess_distance(128), 0); assert_eq!(guess_distance(255), 0); assert_eq!(guess_distance(30), 2); assert_eq!(guess_distance(60), 4); assert_eq!(guess_distance(120), 8); assert_eq!(guess_distance(200), 55); assert_eq!(guess_distance(1), 31); assert_eq!(guess_distance(33), 31); assert_eq!(guess_distance(65), 63); assert_eq!(guess_distance(129), 126); } #[test] fn test_calculate_bad_ttl() { assert_eq!(calculate_ttl(0), Ttl::Bad(0)); } #[test] fn test_calculate_ttl_standard_initial_values() { assert_eq!(calculate_ttl(32), Ttl::Distance(32, 0)); assert_eq!(calculate_ttl(64), Ttl::Distance(64, 0)); assert_eq!(calculate_ttl(128), Ttl::Distance(128, 0)); assert_eq!(calculate_ttl(255), Ttl::Distance(255, 0)); } #[test] fn test_calculate_ttl_acceptable_distances() { // TTLs within MAX_HOPS_ACCEPTABLE (30) should return Distance assert_eq!(calculate_ttl(57), Ttl::Distance(57, 7)); assert_eq!(calculate_ttl(120), Ttl::Distance(120, 8)); assert_eq!(calculate_ttl(240), Ttl::Distance(240, 15)); assert_eq!(calculate_ttl(20), Ttl::Distance(20, 12)); assert_eq!(calculate_ttl(34), Ttl::Distance(34, 30)); } #[test] fn test_calculate_ttl_excessive_distances() { // TTLs with distance > MAX_HOPS_ACCEPTABLE should return Value assert_eq!(calculate_ttl(1), Ttl::Value(1)); assert_eq!(calculate_ttl(33), Ttl::Value(33)); assert_eq!(calculate_ttl(97), Ttl::Value(97)); assert_eq!(calculate_ttl(150), Ttl::Value(150)); assert_eq!(calculate_ttl(224), Ttl::Value(224)); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/window_size.rs
huginn-net-tcp/tests/window_size.rs
use huginn_net_db::tcp::{IpVersion, WindowSize}; use huginn_net_tcp::window_size::detect_win_multiplicator; #[test] fn test_mss_multiple() { let mss = 1000; let multiplier = 40; let window = mss * multiplier; // 1000 * 40 = 40000 (within u16) let result = detect_win_multiplicator(window, mss, 40, false, &IpVersion::V4); assert!(matches!(result, WindowSize::Mss(40))); } #[test] fn test_mtu_multiple() { let window = 4500; // 1500 * 3 let result = detect_win_multiplicator(window, 1460, 40, false, &IpVersion::V4); assert!(matches!(result, WindowSize::Mtu(3))); } #[test] fn test_modulo_pattern() { let window = 8192; // Power of 2, should match largest modulo (4096) let mss = 1337; // Prime number MSS to avoid any accidental divisions let result = detect_win_multiplicator(window, mss, 40, false, &IpVersion::V4); println!("Result for window {window}: {result:?}"); assert!(matches!(result, WindowSize::Mod(4096))); } #[test] fn test_timestamp_adjustment() { let window = 43800; // (1460 - 12) * 30 let result = detect_win_multiplicator(window, 1460, 40, true, &IpVersion::V4); assert!(matches!(result, WindowSize::Mss(30))); } #[test] fn test_direct_value() { let window = 12345; // Arbitrary value let result = detect_win_multiplicator(window, 1460, 40, false, &IpVersion::V4); assert!(matches!(result, WindowSize::Value(12345))); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-tcp/tests/packet_parser.rs
huginn-net-tcp/tests/packet_parser.rs
use huginn_net_tcp::packet_parser::{ detect_datalink_format, parse_packet, DatalinkFormat, IpPacket, }; use pnet::packet::Packet; #[test] fn test_detect_null_datalink() { // NULL datalink: 4-byte header + IPv6 packet let null_packet = vec![ 0x1e, 0x00, 0x00, 0x00, // NULL header 0x60, 0x00, 0x00, 0x00, // IPv6 header start (version=6) 0x00, 0x14, 0x06, 0x40, // IPv6 payload length, next header (TCP), hop limit // Add minimal IPv6 addresses (32 bytes total) 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // src 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // dst ]; let format = detect_datalink_format(&null_packet); assert_eq!(format, Some(DatalinkFormat::Null)); } #[test] fn test_detect_raw_ipv4() { // Raw IPv4 packet (no datalink header) let raw_ipv4 = vec![ 0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28 0x00, 0x00, 0x40, 0x00, // ID=0, Flags=0x4000 (DF), Fragment Offset=0 0x40, 0x06, 0x7c, 0xb0, // TTL=64, Protocol=TCP(6), Checksum 0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1 0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2 ]; let format = detect_datalink_format(&raw_ipv4); assert_eq!(format, Some(DatalinkFormat::RawIp)); } #[test] fn test_detect_raw_ipv6() { // Raw IPv6 packet (no datalink header) let raw_ipv6 = vec![ 0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0 0x00, 0x00, 0x06, 0x40, // Payload Length=0, Next Header=TCP(6), Hop Limit=64 // Source address: 2001:db8::1 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Destination address: 2001:db8::2 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]; let format = detect_datalink_format(&raw_ipv6); assert_eq!(format, Some(DatalinkFormat::RawIp)); } #[test] fn test_detect_ethernet_ipv4() { // Ethernet frame with IPv4 payload let ethernet_ipv4 = vec![ // Ethernet header (14 bytes) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Destination MAC 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Source MAC 0x08, 0x00, // EtherType: IPv4 // IPv4 header 0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28 0x00, 0x00, 0x40, 0x00, // ID=0, Flags=0x4000 (DF), Fragment Offset=0 0x40, 0x06, 0x7c, 0xb0, // TTL=64, Protocol=TCP(6), Checksum 0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1 0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2 ]; let format = detect_datalink_format(&ethernet_ipv4); assert_eq!(format, Some(DatalinkFormat::Ethernet)); } #[test] fn test_detect_ethernet_ipv6() { // Ethernet frame with IPv6 payload let ethernet_ipv6 = vec![ // Ethernet header (14 bytes) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Destination MAC 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Source MAC 0x86, 0xDD, // EtherType: IPv6 // IPv6 header (40 bytes) 0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0 0x00, 0x00, 0x06, 0x40, // Payload Length=0, Next Header=TCP(6), Hop Limit=64 // Source address: 2001:db8::1 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Destination address: 2001:db8::2 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]; let format = detect_datalink_format(&ethernet_ipv6); assert_eq!(format, Some(DatalinkFormat::Ethernet)); } #[test] fn test_parse_ethernet_ipv4() { // Test parsing Ethernet frame with IPv4 let ethernet_ipv4 = vec![ // Ethernet header (14 bytes) 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Destination MAC 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Source MAC 0x08, 0x00, // EtherType: IPv4 // IPv4 header 0x45, 0x00, 0x00, 0x1c, // Version=4, IHL=5, TOS=0, Total Length=28 0x00, 0x00, 0x40, 0x00, // ID=0, Flags=0x4000 (DF), Fragment Offset=0 0x40, 0x06, 0x7c, 0xb0, // TTL=64, Protocol=TCP(6), Checksum 0xc0, 0xa8, 0x01, 0x01, // Source IP: 192.168.1.1 0xc0, 0xa8, 0x01, 0x02, // Dest IP: 192.168.1.2 ]; match parse_packet(&ethernet_ipv4) { IpPacket::Ipv4(ipv4) => { assert_eq!(ipv4.get_version(), 4); assert_eq!(ipv4.get_header_length() as usize, 5); // IHL=5 (5 * 4 = 20 bytes) } _ => panic!("Expected IPv4 packet"), } } #[test] fn test_parse_raw_ipv6() { // Test parsing raw IPv6 packet let raw_ipv6 = vec![ 0x60, 0x00, 0x00, 0x00, // Version=6, Traffic Class=0, Flow Label=0 0x00, 0x00, 0x06, 0x40, // Payload Length=0, Next Header=TCP(6), Hop Limit=64 // Source address: 2001:db8::1 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // Destination address: 2001:db8::2 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]; match parse_packet(&raw_ipv6) { IpPacket::Ipv6(ipv6) => { assert_eq!(ipv6.get_version(), 6); assert_eq!(ipv6.packet().len(), 40); // IPv6 header length } _ => panic!("Expected IPv6 packet"), } } #[test] fn test_parse_null_datalink_ipv6() { // Test parsing NULL datalink with IPv6 let null_ipv6 = vec![ 0x1e, 0x00, 0x00, 0x00, // NULL header 0x60, 0x00, 0x00, 0x00, // IPv6 header start (version=6) 0x00, 0x14, 0x06, 0x40, // IPv6 payload length, next header (TCP), hop limit // Add minimal IPv6 addresses (32 bytes total) 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // src 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // dst ]; match parse_packet(&null_ipv6) { IpPacket::Ipv6(ipv6) => { assert_eq!(ipv6.get_version(), 6); assert_eq!(ipv6.packet().len(), 40); // IPv6 header (40 bytes total) } _ => panic!("Expected NULL datalink IPv6 packet"), } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/benches/bench_tls.rs
benches/bench_tls.rs
use criterion::{criterion_group, criterion_main, Criterion}; use huginn_net_tls::{process_ipv4_packet, process_ipv6_packet, tls_process::is_tls_traffic}; use pcap_file::pcap::PcapReader; use pnet::packet::ethernet::{EtherTypes, EthernetPacket}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use pnet::packet::tcp::TcpPacket; use pnet::packet::Packet; use std::error::Error; use std::fs::File; use std::sync::Mutex; use std::time::Duration; /// Number of times to repeat the PCAP dataset for stable benchmarks const REPEAT_COUNT: usize = 1000; /// Benchmark results storage for automatic reporting static BENCHMARK_RESULTS: Mutex<Option<BenchmarkReport>> = Mutex::new(None); #[derive(Debug, Clone)] struct BenchmarkReport { packet_count: usize, tls_packet_count: u32, ja4_fingerprints: u32, pcap_name: String, timings: Vec<(String, Duration)>, } criterion_group!( tls_benches, bench_tls_ja4_fingerprinting_tls12, bench_tls_ja4_fingerprinting_alpn_h2, bench_tls_packet_parsing_performance, bench_tls_ja4_calculation_overhead, bench_tls_parallel_processing, generate_final_report ); criterion_main!(tls_benches); /// Calculate throughput in packets per second fn calculate_throughput(duration: Duration, packet_count: usize) -> f64 { let seconds = duration.as_secs_f64(); if seconds > 0.0 { (packet_count as f64) / seconds } else { 0.0 } } /// Format throughput for display fn format_throughput(pps: f64) -> String { if pps >= 1_000_000.0 { format!("{:.2}M", pps / 1_000_000.0) } else if pps >= 1_000.0 { format!("{:.1}k", pps / 1_000.0) } else { format!("{pps:.0}") } } /// Calculate overhead percentage fn calculate_overhead(baseline: Duration, target: Duration) -> f64 { let baseline_ns = baseline.as_nanos() as f64; let target_ns = target.as_nanos() as f64; if baseline_ns > 0.0 { ((target_ns - baseline_ns) / baseline_ns) * 100.0 } else { 0.0 } } /// Measure average execution time for a benchmark fn measure_average_time<F>(mut f: F, iterations: usize) -> Duration where F: FnMut(), { let start = std::time::Instant::now(); for _ in 0..iterations { f(); } start .elapsed() .checked_div(iterations as u32) .unwrap_or(Duration::ZERO) } /// Generate comprehensive benchmark report fn generate_final_report(_c: &mut Criterion) { let report = match BENCHMARK_RESULTS.lock() { Ok(guard) => guard.clone(), Err(_) => return, }; let Some(report) = report else { return; }; println!("\n"); println!("==============================================================================="); println!(" TLS BENCHMARK ANALYSIS REPORT "); println!("==============================================================================="); println!(); println!("PCAP Analysis Summary:"); println!(" - PCAP file: {}", report.pcap_name); println!(" - Total packets analyzed: {}", report.packet_count); println!(" - TLS packets found: {}", report.tls_packet_count); println!(" - JA4 fingerprints generated: {}", report.ja4_fingerprints); let tls_effectiveness = (report.tls_packet_count as f64 / report.packet_count as f64) * 100.0; println!(" - TLS packet effectiveness: {tls_effectiveness:.1}%"); println!(); if report.timings.is_empty() { return; } // Find key timings for calculations let detection_time = report .timings .iter() .find(|(name, _)| name.contains("detection")) .map(|(_, t)| *t); let parsing_time = report .timings .iter() .find(|(name, _)| name.contains("parsing")) .map(|(_, t)| *t); let tls_processing = report .timings .iter() .find(|(name, _)| name.contains("tls_processing") && !name.contains("full")) .map(|(_, t)| *t); let full_tls = report .timings .iter() .find(|(name, _)| name.contains("full_tls_processing")) .map(|(_, t)| *t); println!("Performance Summary:"); println!("+--------------------------------------------------------------------------+"); println!("| Operation | Time/Packet | Throughput | Overhead |"); println!("+--------------------------------------------------------------------------+"); for (name, duration) in &report.timings { let per_packet = duration .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let pps_str = format_throughput(throughput); let overhead_str = if let Some(baseline) = detection_time { if name.contains("detection") { " 1.0x ".to_string() } else { let overhead = calculate_overhead(baseline, per_packet); format!("{:>7.1}x", overhead / 100.0 + 1.0) } } else { " - ".to_string() }; let display_name = name .replace("tls_", "") .replace("_", " ") .chars() .take(32) .collect::<String>(); println!( "| {display_name:<32} | {per_packet:>11.3?} | {pps_str:>9} pps | {overhead_str:>10} |" ); } println!("+--------------------------------------------------------------------------+"); println!(); // Overhead Analysis if let (Some(detection), Some(processing)) = (detection_time, tls_processing) { let overhead = calculate_overhead(detection, processing); println!("Overhead Analysis:"); println!( " - TLS Detection -> Full Processing: {:.1}x overhead (parsing + JA4 calculation)", overhead / 100.0 + 1.0 ); } if let (Some(parsing), Some(processing)) = (parsing_time, tls_processing) { let overhead = calculate_overhead(parsing, processing); println!( " - Parsing -> Full Processing: {:.1}x overhead (JA4 calculation only)", overhead / 100.0 + 1.0 ); } println!(); // Capacity Planning let capacity_time = full_tls.or(tls_processing); if let Some(processing) = capacity_time { let per_packet = processing .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!("Capacity Planning:"); println!(); println!("Sequential Mode (1 core):"); println!(" - Throughput: {} packets/second", format_throughput(throughput)); println!( " - 1 Gbps (81,274 pps): {:.1}% CPU{}", cpu_1gbps, if cpu_1gbps > 100.0 { " [OVERLOAD]" } else { "" } ); println!( " - 10 Gbps (812,740 pps): {:.1}% CPU{}", cpu_10gbps, if cpu_10gbps > 100.0 { " [OVERLOAD]" } else { "" } ); // Parallel Mode Analysis let parallel_2 = report .timings .iter() .find(|(name, _)| name.contains("parallel_2_workers")) .map(|(_, t)| *t); let parallel_4 = report .timings .iter() .find(|(name, _)| name.contains("parallel_4_workers")) .map(|(_, t)| *t); let parallel_8 = report .timings .iter() .find(|(name, _)| name.contains("parallel_8_workers")) .map(|(_, t)| *t); if parallel_2.is_some() || parallel_4.is_some() || parallel_8.is_some() { println!(); println!("Parallel Mode Performance:"); println!(); let available_cpus = std::thread::available_parallelism() .map(|n| n.get()) .unwrap_or(1); println!(" System CPUs: {available_cpus}"); println!(); if let Some(p2) = parallel_2 { let per_packet = p2 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); println!(" 2 Workers:"); println!(" - Throughput: {} pps", format_throughput(throughput)); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!(" - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU"); println!(" - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU"); } if let Some(p4) = parallel_4 { let per_packet = p4 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); println!(); println!(" 4 Workers:"); println!(" - Throughput: {} pps", format_throughput(throughput)); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!(" - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU"); println!(" - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU"); } if let Some(p8) = parallel_8 { let per_packet = p8 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); println!(); println!(" 8 Workers:"); println!(" - Throughput: {} pps", format_throughput(throughput)); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!(" - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU"); println!(" - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU"); } println!(); println!("Note: TLS uses round-robin dispatch (stateless processing)"); println!(" Parallel benchmarks include worker pool overhead"); } } println!(); println!("Benchmark report generation complete"); println!(); } fn load_packets_from_pcap(pcap_path: &str) -> Result<Vec<Vec<u8>>, Box<dyn Error>> { let file = File::open(pcap_path)?; let mut pcap_reader = PcapReader::new(file)?; let mut packets = Vec::new(); while let Some(pkt) = pcap_reader.next_packet() { packets.push(pkt?.data.into()); } Ok(packets) } /// Load packets from PCAP and repeat them for stable benchmarks fn load_packets_repeated(pcap_path: &str, repeat: usize) -> Result<Vec<Vec<u8>>, Box<dyn Error>> { let packets = load_packets_from_pcap(pcap_path)?; if packets.is_empty() { return Ok(packets); } // Repeat packets to get a stable benchmark dataset let capacity = packets.len().saturating_mul(repeat); let mut repeated = Vec::with_capacity(capacity); for _ in 0..repeat { repeated.extend(packets.iter().cloned()); } Ok(repeated) } /// Detect if packet contains TLS traffic (using library function) fn detect_tls_in_packet(packet: &[u8]) -> bool { let ethernet = match EthernetPacket::new(packet) { Some(eth) => eth, None => return false, }; match ethernet.get_ethertype() { EtherTypes::Ipv4 => { let ipv4 = match Ipv4Packet::new(ethernet.payload()) { Some(ip) => ip, None => return false, }; if ipv4.get_next_level_protocol() == IpNextHeaderProtocols::Tcp { if let Some(tcp) = TcpPacket::new(ipv4.payload()) { return is_tls_traffic(tcp.payload()); } } false } EtherTypes::Ipv6 => { let ipv6 = match Ipv6Packet::new(ethernet.payload()) { Some(ip) => ip, None => return false, }; if ipv6.get_next_header() == IpNextHeaderProtocols::Tcp { if let Some(tcp) = TcpPacket::new(ipv6.payload()) { return is_tls_traffic(tcp.payload()); } } false } _ => false, } } /// Process a packet using the public TLS API fn process_tls_packet(packet: &[u8]) -> Option<huginn_net_tls::TlsClientOutput> { match huginn_net_tls::packet_parser::parse_packet(packet) { huginn_net_tls::packet_parser::IpPacket::Ipv4(ipv4) => { process_ipv4_packet(&ipv4).ok().flatten() } huginn_net_tls::packet_parser::IpPacket::Ipv6(ipv6) => { process_ipv6_packet(&ipv6).ok().flatten() } huginn_net_tls::packet_parser::IpPacket::None => None, } } /// Benchmark TLS JA4 fingerprinting using TLS 1.2 PCAP fn bench_tls_ja4_fingerprinting_tls12(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/tls12.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TLS 1.2 PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TLS 1.2 PCAP file"); return; } println!("TLS 1.2 PCAP Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count TLS packets for analysis let mut tls_packet_count: u32 = 0; for packet in &packets { if process_tls_packet(packet).is_some() { tls_packet_count = tls_packet_count.saturating_add(1); } } println!(" TLS packets found: {tls_packet_count}"); println!("--------------------"); // Initialize benchmark report if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { *guard = Some(BenchmarkReport { packet_count: packets.len(), tls_packet_count, ja4_fingerprints: tls_packet_count, pcap_name: format!("tls12.pcap ({REPEAT_COUNT}x)"), timings: Vec::new(), }); } let mut group = c.benchmark_group("TLS_JA4_TLS12"); // Baseline: TLS detection only group.bench_function("tls_detection", |b| { b.iter(|| { for packet in packets.iter() { let _ = detect_tls_in_packet(packet); } }) }); // Benchmark TLS processing group.bench_function("tls_processing", |b| { b.iter(|| { for packet in packets.iter() { let _ = process_tls_packet(packet); } }) }); // Benchmark TLS packet parsing only (without full analysis) group.bench_function("tls_packet_parsing", |b| { b.iter(|| { for packet in packets.iter() { // Just parse the packet structure without full analysis let _ = huginn_net_tls::packet_parser::parse_packet(packet); } }) }); group.finish(); // Measure and store actual times for reporting let tls_detection_time = measure_average_time( || { for packet in packets.iter() { let _ = detect_tls_in_packet(packet); } }, 10, ); let tls_processing_time = measure_average_time( || { for packet in packets.iter() { let _ = process_tls_packet(packet); } }, 10, ); let parsing_time = measure_average_time( || { for packet in packets.iter() { let _ = huginn_net_tls::packet_parser::parse_packet(packet); } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("tls_detection".to_string(), tls_detection_time)); report .timings .push(("tls_processing".to_string(), tls_processing_time)); report .timings .push(("tls_packet_parsing".to_string(), parsing_time)); } } } /// Benchmark TLS JA4 fingerprinting using TLS ALPN H2 PCAP fn bench_tls_ja4_fingerprinting_alpn_h2(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/tls-alpn-h2.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TLS ALPN H2 PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TLS ALPN H2 PCAP file"); return; } println!("TLS ALPN H2 PCAP Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count TLS packets for analysis let mut tls_packet_count: u32 = 0; for packet in &packets { if process_tls_packet(packet).is_some() { tls_packet_count = tls_packet_count.saturating_add(1); } } println!(" TLS packets found: {tls_packet_count}"); println!("--------------------"); let mut group = c.benchmark_group("TLS_JA4_ALPN_H2"); // Benchmark TLS processing group.bench_function("tls_processing", |b| { b.iter(|| { for packet in packets.iter() { let _ = process_tls_packet(packet); } }) }); // Benchmark with TLS extensions analysis group.bench_function("tls_extensions_analysis", |b| { b.iter(|| { for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { // Access JA4 fingerprint to ensure full processing let _ = &result.sig.ja4.full; let _ = &result.sig.ja4.raw; } } }) }); group.finish(); // Measure and store ALPN H2 times let tls_alpn_processing_time = measure_average_time( || { for packet in packets.iter() { let _ = process_tls_packet(packet); } }, 10, ); let tls_extensions_time = measure_average_time( || { for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { let _ = &result.sig.ja4.full; let _ = &result.sig.ja4.raw; } } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("tls_alpn_processing".to_string(), tls_alpn_processing_time)); report .timings .push(("tls_extensions_analysis".to_string(), tls_extensions_time)); } } } /// Benchmark TLS packet parsing performance without JA4 calculation fn bench_tls_packet_parsing_performance(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/tls12.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TLS PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TLS PCAP file"); return; } println!("TLS Packet Parsing Performance:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count TLS packets for analysis let mut tls_packet_count: u32 = 0; for packet in &packets { if process_tls_packet(packet).is_some() { tls_packet_count = tls_packet_count.saturating_add(1); } } println!(" TLS packets found: {tls_packet_count}"); println!("--------------------"); let mut group = c.benchmark_group("TLS_Packet_Parsing"); // Benchmark raw packet parsing (just structure, no fingerprinting) group.bench_function("raw_packet_parsing", |b| { b.iter(|| { for packet in packets.iter() { let _ = huginn_net_tls::packet_parser::parse_packet(packet); } }) }); // Benchmark full TLS processing with JA4 fingerprinting group.bench_function("full_tls_processing", |b| { b.iter(|| { for packet in packets.iter() { let _ = process_tls_packet(packet); } }) }); // Benchmark TLS processing with result extraction group.bench_function("tls_with_result_extraction", |b| { b.iter(|| { for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { // Force evaluation of JA4 fingerprints let _ = &result.sig.ja4.full; let _ = &result.sig.ja4.raw; let _ = &result.sig.version; } } }) }); group.finish(); // Measure and store parsing performance times let raw_parsing_time = measure_average_time( || { for packet in packets.iter() { let _ = huginn_net_tls::packet_parser::parse_packet(packet); } }, 10, ); let full_tls_time = measure_average_time( || { for packet in packets.iter() { let _ = process_tls_packet(packet); } }, 10, ); let extraction_time = measure_average_time( || { for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { let _ = &result.sig.ja4.full; let _ = &result.sig.ja4.raw; let _ = &result.sig.version; } } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("raw_packet_parsing".to_string(), raw_parsing_time)); report .timings .push(("full_tls_processing".to_string(), full_tls_time)); report .timings .push(("tls_with_result_extraction".to_string(), extraction_time)); } } } /// Benchmark JA4 calculation overhead by comparing different processing levels fn bench_tls_ja4_calculation_overhead(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/tls-alpn-h2.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TLS ALPN H2 PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TLS ALPN H2 PCAP file"); return; } println!("JA4 Calculation Overhead Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count TLS packets for analysis let mut tls_packet_count: u32 = 0; let mut ja4_count: u32 = 0; for packet in &packets { if let Some(_result) = process_tls_packet(packet) { tls_packet_count = tls_packet_count.saturating_add(1); // JA4 is always generated if we have a TLS result ja4_count = ja4_count.saturating_add(1); } } println!(" TLS packets found: {tls_packet_count}"); println!(" JA4 fingerprints generated: {ja4_count}"); println!("--------------------"); let mut group = c.benchmark_group("TLS_JA4_Overhead"); // Benchmark basic TLS processing group.bench_function("basic_tls_processing", |b| { b.iter(|| { for packet in packets.iter() { let _ = process_tls_packet(packet); } }) }); // Benchmark with JA4 fingerprint access (forces calculation) group.bench_function("ja4_fingerprint_access", |b| { b.iter(|| { for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { // Access JA4 fingerprints to force calculation let _ = result.sig.ja4.full.to_string(); let _ = result.sig.ja4.raw.to_string(); } } }) }); // Benchmark with full result analysis group.bench_function("full_result_analysis", |b| { b.iter(|| { let mut results = Vec::new(); for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { // Collect all TLS information results.push(( result.sig.version, result.sig.ja4.full.clone(), result.sig.ja4.raw.clone(), result.source, result.destination, )); } } // Process results to simulate real-world usage let _ = results.len(); }) }); group.finish(); // Measure and store JA4 overhead times let basic_tls_time = measure_average_time( || { for packet in packets.iter() { let _ = process_tls_packet(packet); } }, 10, ); let ja4_access_time = measure_average_time( || { for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { let _ = result.sig.ja4.full.to_string(); let _ = result.sig.ja4.raw.to_string(); } } }, 10, ); let full_analysis_time = measure_average_time( || { let mut results = Vec::new(); for packet in packets.iter() { if let Some(result) = process_tls_packet(packet) { results.push(( result.sig.version, result.sig.ja4.full.clone(), result.sig.ja4.raw.clone(), result.source, result.destination, )); } } let _ = results.len(); }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("basic_tls_processing".to_string(), basic_tls_time)); report .timings .push(("ja4_fingerprint_access".to_string(), ja4_access_time)); report .timings .push(("full_result_analysis".to_string(), full_analysis_time)); } } } /// Benchmark TLS parallel processing with different worker counts fn bench_tls_parallel_processing(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/tls12.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TLS PCAP file for parallel benchmark: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TLS PCAP file for parallel benchmark"); return; } println!("TLS Parallel Processing Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); println!("--------------------"); let worker_counts = [2, 4, 8]; let mut group = c.benchmark_group("TLS_Parallel_Processing"); for &num_workers in &worker_counts { let bench_name = format!("parallel_{num_workers}_workers"); group.bench_function(&bench_name, |b| { b.iter(|| { let (tx, rx) = std::sync::mpsc::channel(); let pool = match huginn_net_tls::WorkerPool::new(num_workers, 100, 32, 10, tx, None) { Ok(p) => p, Err(e) => panic!("Failed to create worker pool: {e}"), }; // Dispatch all packets for packet in packets.iter() { let _ = pool.dispatch(packet.clone()); } // Shutdown and collect results pool.shutdown(); let mut _result_count: usize = 0; while rx.recv().is_ok() { _result_count = _result_count.saturating_add(1); } }) }); } group.finish(); // Measure parallel processing times for reporting let parallel_2_workers_time = measure_average_time( || { let (tx, rx) = std::sync::mpsc::channel(); let pool = match huginn_net_tls::WorkerPool::new(2, 100, 32, 10, tx, None) { Ok(p) => p, Err(e) => panic!("Failed to create worker pool: {e}"), }; for packet in packets.iter() { let _ = pool.dispatch(packet.clone()); } pool.shutdown(); while rx.recv().is_ok() {} }, 3, ); let parallel_4_workers_time = measure_average_time( || { let (tx, rx) = std::sync::mpsc::channel(); let pool = match huginn_net_tls::WorkerPool::new(4, 100, 32, 10, tx, None) { Ok(p) => p, Err(e) => panic!("Failed to create worker pool: {e}"), }; for packet in packets.iter() { let _ = pool.dispatch(packet.clone()); } pool.shutdown(); while rx.recv().is_ok() {} }, 3, ); let parallel_8_workers_time = measure_average_time( || { let (tx, rx) = std::sync::mpsc::channel(); let pool = match huginn_net_tls::WorkerPool::new(8, 100, 32, 10, tx, None) { Ok(p) => p, Err(e) => panic!("Failed to create worker pool: {e}"), }; for packet in packets.iter() { let _ = pool.dispatch(packet.clone()); } pool.shutdown(); while rx.recv().is_ok() {} }, 3, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("parallel_2_workers".to_string(), parallel_2_workers_time)); report .timings .push(("parallel_4_workers".to_string(), parallel_4_workers_time)); report .timings .push(("parallel_8_workers".to_string(), parallel_8_workers_time)); } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/benches/bench_http.rs
benches/bench_http.rs
use criterion::{criterion_group, criterion_main, Criterion}; use huginn_net_db::Database; use huginn_net_http::{ process_ipv4_packet, process_ipv6_packet, FlowKey, HttpProcessors, SignatureMatcher, TcpFlow, }; use pcap_file::pcap::PcapReader; use std::error::Error; use std::fs::File; use std::sync::Mutex; use std::time::Duration; use ttl_cache::TtlCache; /// Number of times to repeat the PCAP dataset for stable benchmarks const REPEAT_COUNT: usize = 1000; /// Benchmark results storage for automatic reporting static BENCHMARK_RESULTS: Mutex<Option<BenchmarkReport>> = Mutex::new(None); #[derive(Debug, Clone)] struct BenchmarkReport { packet_count: usize, request_count: u32, response_count: u32, browser_detections: u32, server_detections: u32, http1_requests: u32, http2_requests: u32, timings: Vec<(String, Duration)>, } criterion_group!( http_benches, bench_http_browser_detection, bench_http_server_detection, bench_http_protocol_analysis, bench_http_processing_overhead, bench_http_parallel_processing, generate_final_report ); criterion_main!(http_benches); /// Calculate throughput in packets per second fn calculate_throughput(duration: Duration, packet_count: usize) -> f64 { let seconds = duration.as_secs_f64(); if seconds > 0.0 { (packet_count as f64) / seconds } else { 0.0 } } /// Format throughput for display fn format_throughput(pps: f64) -> String { if pps >= 1_000_000.0 { format!("{:.2}M", pps / 1_000_000.0) } else if pps >= 1_000.0 { format!("{:.1}k", pps / 1_000.0) } else { format!("{pps:.0}") } } /// Calculate overhead percentage fn calculate_overhead(baseline: Duration, target: Duration) -> f64 { let baseline_ns = baseline.as_nanos() as f64; let target_ns = target.as_nanos() as f64; if baseline_ns > 0.0 { ((target_ns - baseline_ns) / baseline_ns) * 100.0 } else { 0.0 } } /// Measure average execution time for a benchmark fn measure_average_time<F>(mut f: F, iterations: usize) -> Duration where F: FnMut(), { let start = std::time::Instant::now(); for _ in 0..iterations { f(); } start .elapsed() .checked_div(iterations as u32) .unwrap_or(Duration::ZERO) } /// Generate comprehensive benchmark report fn generate_final_report(_c: &mut Criterion) { let report = match BENCHMARK_RESULTS.lock() { Ok(guard) => guard.clone(), Err(_) => return, }; let Some(report) = report else { return; }; let original_count = report.packet_count / REPEAT_COUNT; println!("\n"); println!("==============================================================================="); println!(" HTTP BENCHMARK ANALYSIS REPORT "); println!("==============================================================================="); println!(); println!("PCAP Analysis Summary:"); println!( " - Total packets analyzed: {} (repeated {}x)", report.packet_count, REPEAT_COUNT ); println!(" - Original PCAP packets: {original_count}"); println!(" - HTTP requests: {}", report.request_count); println!(" - HTTP responses: {}", report.response_count); println!(" - Browser detections: {}", report.browser_detections); println!(" - Server detections: {}", report.server_detections); println!(" - HTTP/1.x requests: {}", report.http1_requests); println!(" - HTTP/2 requests: {}", report.http2_requests); let http_effectiveness = ((report.request_count.saturating_add(report.response_count)) as f64 / report.packet_count as f64) * 100.0; println!(" - HTTP packet effectiveness: {http_effectiveness:.1}%"); let detection_rate = if report.request_count > 0 { (report.browser_detections as f64 / report.request_count as f64) * 100.0 } else { 0.0 }; println!(" - Browser detection rate: {detection_rate:.1}%"); println!(); if report.timings.is_empty() { return; } // Find key timings for calculations let parsing_time = report .timings .iter() .find(|(name, _)| name.contains("parsing")) .map(|(_, t)| *t); let http_no_match = report .timings .iter() .find(|(name, _)| name.contains("without")) .map(|(_, t)| *t); let http_with_match = report .timings .iter() .find(|(name, _)| name.contains("with")) .map(|(_, t)| *t); let full_analysis = report .timings .iter() .find(|(name, _)| name.contains("full_http_analysis")) .map(|(_, t)| *t); println!("Performance Summary:"); println!("+--------------------------------------------------------------------------+"); println!("| Operation | Time/Packet | Throughput | vs Parsing |"); println!("+--------------------------------------------------------------------------+"); for (name, duration) in &report.timings { let per_packet = duration .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let pps_str = format_throughput(throughput); let overhead_str = if let Some(baseline) = parsing_time { if name != "http_packet_parsing" { let overhead = calculate_overhead(baseline, per_packet); format!("{:>7.0}x", overhead / 100.0 + 1.0) } else { " 1.0x ".to_string() } } else { " - ".to_string() }; let display_name = name .replace("http_", "") .replace("_", " ") .chars() .take(32) .collect::<String>(); println!( "| {display_name:<32} | {per_packet:>11.3?} | {pps_str:>9} pps | {overhead_str:>10} |" ); } println!("+--------------------------------------------------------------------------+"); println!(); // Overhead Analysis if let (Some(parsing), Some(full)) = (parsing_time, full_analysis) { let overhead = calculate_overhead(parsing, full); println!("Overhead Analysis:"); println!( " - Parsing -> Full Analysis: {:.0}x overhead (expected for comprehensive analysis)", overhead / 100.0 + 1.0 ); } if let (Some(no_match), Some(with_match)) = (http_no_match, http_with_match) { let overhead = calculate_overhead(no_match, with_match); println!( " - HTTP without matching -> with matching: {overhead:.1}% (database lookup cost)" ); } println!(); // Capacity Planning if let Some(full) = full_analysis { let per_packet = full .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!("Capacity Planning:"); println!(); println!("Sequential Mode (1 core):"); println!(" - Throughput: {} packets/second", format_throughput(throughput)); println!( " - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU{}", if cpu_1gbps > 100.0 { " [OVERLOAD]" } else { "" } ); println!( " - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU{}", if cpu_10gbps > 100.0 { " [OVERLOAD]" } else { "" } ); } // Parallel Mode Results let parallel_2 = report .timings .iter() .find(|(name, _)| name.contains("parallel_2_workers")) .map(|(_, t)| *t); let parallel_4 = report .timings .iter() .find(|(name, _)| name.contains("parallel_4_workers")) .map(|(_, t)| *t); let parallel_8 = report .timings .iter() .find(|(name, _)| name.contains("parallel_8_workers")) .map(|(_, t)| *t); if parallel_2.is_some() || parallel_4.is_some() || parallel_8.is_some() { println!(); println!("Parallel Mode (Multi-Worker):"); println!("Note: Includes worker pool overhead and flow-based hashing"); println!(); if let Some(p2) = parallel_2 { let per_packet = p2 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!("2 Workers:"); println!(" - Throughput: {} packets/second", format_throughput(throughput)); println!(" - 1 Gbps CPU: {cpu_1gbps:.1}%"); println!(" - 10 Gbps CPU: {cpu_10gbps:.1}%"); println!(); } if let Some(p4) = parallel_4 { let per_packet = p4 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!("4 Workers:"); println!(" - Throughput: {} packets/second", format_throughput(throughput)); println!(" - 1 Gbps CPU: {cpu_1gbps:.1}%"); println!(" - 10 Gbps CPU: {cpu_10gbps:.1}%"); println!(); } if let Some(p8) = parallel_8 { let per_packet = p8 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!("8 Workers:"); println!(" - Throughput: {} packets/second", format_throughput(throughput)); println!(" - 1 Gbps CPU: {cpu_1gbps:.1}%"); println!(" - 10 Gbps CPU: {cpu_10gbps:.1}%"); println!(); } } println!(); println!("Benchmark report generation complete"); println!(); } fn load_packets_from_pcap(pcap_path: &str) -> Result<Vec<Vec<u8>>, Box<dyn Error>> { let file = File::open(pcap_path)?; let mut pcap_reader = PcapReader::new(file)?; let mut packets = Vec::new(); while let Some(pkt) = pcap_reader.next_packet() { packets.push(pkt?.data.into()); } Ok(packets) } /// Load packets from PCAP and repeat them for stable benchmarking fn load_packets_repeated(pcap_path: &str, repeat: usize) -> Result<Vec<Vec<u8>>, Box<dyn Error>> { let packets = load_packets_from_pcap(pcap_path)?; if packets.is_empty() { return Ok(packets); } // Repeat packets to get a stable benchmark dataset let capacity = packets.len().saturating_mul(repeat); let mut repeated = Vec::with_capacity(capacity); for _ in 0..repeat { repeated.extend(packets.iter().cloned()); } Ok(repeated) } /// Process a packet using the public HTTP API fn process_http_packet( packet: &[u8], http_flows: &mut TtlCache<FlowKey, TcpFlow>, http_processors: &HttpProcessors, matcher: Option<&SignatureMatcher>, ) -> Option<huginn_net_http::HttpAnalysisResult> { match huginn_net_http::packet_parser::parse_packet(packet) { huginn_net_http::packet_parser::IpPacket::Ipv4(ipv4) => { process_ipv4_packet(&ipv4, http_flows, http_processors, matcher).ok() } huginn_net_http::packet_parser::IpPacket::Ipv6(ipv6) => { process_ipv6_packet(&ipv6, http_flows, http_processors, matcher).ok() } huginn_net_http::packet_parser::IpPacket::None => None, } } /// Benchmark HTTP browser detection using simple GET PCAP fn bench_http_browser_detection(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/http-simple-get.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load HTTP simple GET PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in HTTP simple GET PCAP file"); return; } let db = match Database::load_default() { Ok(db) => db, Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; let matcher = SignatureMatcher::new(&db); let http_processors = HttpProcessors::new(); println!("HTTP Browser Detection Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count HTTP analysis results let mut http_flows = TtlCache::new(1000); let mut request_count: u32 = 0; let mut response_count: u32 = 0; let mut browser_detections: u32 = 0; let mut server_detections: u32 = 0; let mut http1_requests: u32 = 0; let mut http2_requests: u32 = 0; for packet in &packets { if let Some(result) = process_http_packet(packet, &mut http_flows, &http_processors, Some(&matcher)) { if let Some(request) = &result.http_request { request_count = request_count.saturating_add(1); if request.browser_matched.browser.is_some() { browser_detections = browser_detections.saturating_add(1); } match request.sig.matching.version { huginn_net_http::http::Version::V10 | huginn_net_http::http::Version::V11 => { http1_requests = http1_requests.saturating_add(1); } huginn_net_http::http::Version::V20 => { http2_requests = http2_requests.saturating_add(1); } _ => {} } } if let Some(response) = &result.http_response { response_count = response_count.saturating_add(1); if response.web_server_matched.web_server.is_some() { server_detections = server_detections.saturating_add(1); } } } } println!(" HTTP requests: {request_count}"); println!(" HTTP responses: {response_count}"); println!(" Browser detections: {browser_detections}"); println!(" Server detections: {server_detections}"); println!("--------------------"); // Initialize benchmark report if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { *guard = Some(BenchmarkReport { packet_count: packets.len(), request_count, response_count, browser_detections, server_detections, http1_requests, http2_requests, timings: Vec::new(), }); } let mut group = c.benchmark_group("HTTP_Browser_Detection"); // Benchmark HTTP processing with browser matching group.bench_function("http_with_browser_matching", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, Some(&matcher)); } }) }); // Benchmark HTTP processing without browser matching group.bench_function("http_without_browser_matching", |b| { b.iter(|| { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, None); } }) }); // Benchmark raw packet parsing only group.bench_function("http_packet_parsing", |b| { b.iter(|| { for packet in packets.iter() { let _ = huginn_net_http::packet_parser::parse_packet(packet); } }) }); group.finish(); // Measure and store actual times for reporting let http_with_match_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db); let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, Some(&matcher)); } }, 10, ); let http_without_match_time = measure_average_time( || { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, None); } }, 10, ); let parsing_time = measure_average_time( || { for packet in packets.iter() { let _ = huginn_net_http::packet_parser::parse_packet(packet); } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("http_with_browser_matching".to_string(), http_with_match_time)); report .timings .push(("http_without_browser_matching".to_string(), http_without_match_time)); report .timings .push(("http_packet_parsing".to_string(), parsing_time)); } } } /// Benchmark HTTP server detection performance fn bench_http_server_detection(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/http-simple-get.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load HTTP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in HTTP PCAP file"); return; } let db = match Database::load_default() { Ok(db) => db, Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; println!("HTTP Server Detection Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count server detections let matcher = SignatureMatcher::new(&db); let http_processors = HttpProcessors::new(); let mut http_flows = TtlCache::new(1000); let mut server_detections: u32 = 0; for packet in &packets { if let Some(result) = process_http_packet(packet, &mut http_flows, &http_processors, Some(&matcher)) { if let Some(response) = result.http_response { if response.web_server_matched.web_server.is_some() { server_detections = server_detections.saturating_add(1); } } } } println!(" Server detections: {server_detections}"); println!("--------------------"); let mut group = c.benchmark_group("HTTP_Server_Detection"); // Benchmark server detection with database matching group.bench_function("server_with_matching", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, Some(&matcher)) { if let Some(response) = result.http_response { // Access server information to ensure full processing let _ = &response.web_server_matched; let _ = &response.diagnosis; } } } }) }); // Benchmark server detection without database matching group.bench_function("server_without_matching", |b| { b.iter(|| { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, None) { if let Some(response) = result.http_response { // Access basic response information let _ = &response.diagnosis; } } } }) }); group.finish(); // Measure and store server detection times let server_with_match_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db); let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, Some(&matcher)) { if let Some(response) = result.http_response { let _ = &response.web_server_matched; let _ = &response.diagnosis; } } } }, 10, ); let server_without_match_time = measure_average_time( || { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, None) { if let Some(response) = result.http_response { let _ = &response.diagnosis; } } } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("server_with_matching".to_string(), server_with_match_time)); report .timings .push(("server_without_matching".to_string(), server_without_match_time)); } } } /// Benchmark HTTP protocol analysis performance (HTTP/1 vs HTTP/2) fn bench_http_protocol_analysis(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/http-simple-get.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load HTTP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in HTTP PCAP file"); return; } println!("HTTP Protocol Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count HTTP protocol versions let http_processors = HttpProcessors::new(); let mut http_flows = TtlCache::new(1000); let mut http1_requests: u32 = 0; let mut http2_requests: u32 = 0; for packet in &packets { if let Some(result) = process_http_packet(packet, &mut http_flows, &http_processors, None) { if let Some(request) = result.http_request { match request.sig.matching.version { huginn_net_http::http::Version::V10 | huginn_net_http::http::Version::V11 => { http1_requests = http1_requests.saturating_add(1); } huginn_net_http::http::Version::V20 => { http2_requests = http2_requests.saturating_add(1); } _ => {} // V30, Any, etc. } } } } println!(" HTTP/1.x requests: {http1_requests}"); println!(" HTTP/2 requests: {http2_requests}"); println!("--------------------"); let mut group = c.benchmark_group("HTTP_Protocol_Analysis"); // Benchmark HTTP protocol detection group.bench_function("protocol_detection", |b| { b.iter(|| { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, None) { // Access protocol version information if let Some(request) = &result.http_request { let _ = request.sig.matching.version; } if let Some(response) = &result.http_response { let _ = response.sig.matching.version; } } } }) }); // Benchmark HTTP header analysis group.bench_function("header_analysis", |b| { b.iter(|| { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, None) { if let Some(request) = &result.http_request { // Access header information to ensure full processing let _ = &request.sig.headers; let _ = &request.lang; } if let Some(response) = &result.http_response { let _ = &response.sig.headers; } } } }) }); // Benchmark with different flow cache sizes group.bench_function("small_flow_cache", |b| { b.iter(|| { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(100); // Smaller cache for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, None); } }) }); group.bench_function("large_flow_cache", |b| { b.iter(|| { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(10000); // Larger cache for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, None); } }) }); group.finish(); // Measure and store protocol analysis times let protocol_detection_time = measure_average_time( || { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, None) { if let Some(request) = &result.http_request { let _ = request.sig.matching.version; } if let Some(response) = &result.http_response { let _ = response.sig.matching.version; } } } }, 10, ); let header_analysis_time = measure_average_time( || { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, None) { if let Some(request) = &result.http_request { let _ = &request.sig.headers; let _ = &request.lang; } if let Some(response) = &result.http_response { let _ = &response.sig.headers; } } } }, 10, ); let small_cache_time = measure_average_time( || { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(100); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, None); } }, 10, ); let large_cache_time = measure_average_time( || { let processors = HttpProcessors::new(); let mut flows = TtlCache::new(10000); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, None); } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("protocol_detection".to_string(), protocol_detection_time)); report .timings .push(("header_analysis".to_string(), header_analysis_time)); report .timings .push(("small_flow_cache".to_string(), small_cache_time)); report .timings .push(("large_flow_cache".to_string(), large_cache_time)); } } } /// Benchmark HTTP processing overhead analysis fn bench_http_processing_overhead(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/http-simple-get.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load HTTP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in HTTP PCAP file"); return; } let db = match Database::load_default() { Ok(db) => db, Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; println!("HTTP Processing Overhead Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); println!("--------------------"); let mut group = c.benchmark_group("HTTP_Processing_Overhead"); // Benchmark minimal HTTP processing group.bench_function("minimal_processing", |b| { b.iter(|| { for packet in packets.iter() { let _ = huginn_net_http::packet_parser::parse_packet(packet); } }) }); // Benchmark full HTTP analysis group.bench_function("full_http_analysis", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); for packet in packets.iter() { let _ = process_http_packet(packet, &mut flows, &processors, Some(&matcher)); } }) }); // Benchmark with result collection group.bench_function("full_analysis_with_collection", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let processors = HttpProcessors::new(); let mut flows = TtlCache::new(1000); let mut results = Vec::new(); for packet in packets.iter() { if let Some(result) = process_http_packet(packet, &mut flows, &processors, Some(&matcher)) { // Collect all HTTP analysis results results.push(( result.http_request.is_some(), result.http_response.is_some(), result.http_request.as_ref().and_then(|r| { r.browser_matched.browser.as_ref().map(|b| b.name.clone()) }), result.http_response.as_ref().and_then(|r| { r.web_server_matched .web_server .as_ref() .map(|s| s.name.clone()) }), )); } } // Process results to simulate real-world usage let _ = results.len(); }) }); group.finish(); // Measure and store processing overhead times let minimal_processing_time = measure_average_time( || { for packet in packets.iter() { let _ = huginn_net_http::packet_parser::parse_packet(packet); } }, 10, ); let full_http_analysis_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db);
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
true
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/benches/bench_tcp.rs
benches/bench_tcp.rs
use criterion::{criterion_group, criterion_main, Criterion}; use huginn_net_db::Database; use huginn_net_tcp::{ process_ipv4_packet, process_ipv6_packet, ConnectionKey, SignatureMatcher, TcpTimestamp, }; use pcap_file::pcap::PcapReader; use std::error::Error; use std::fs::File; use std::sync::Mutex; use std::time::Duration; use ttl_cache::TtlCache; /// Number of times to repeat the PCAP dataset for stable benchmarks const REPEAT_COUNT: usize = 1000; /// Benchmark results storage for automatic reporting static BENCHMARK_RESULTS: Mutex<Option<BenchmarkReport>> = Mutex::new(None); #[derive(Debug, Clone)] struct BenchmarkReport { packet_count: usize, syn_count: u32, syn_ack_count: u32, mtu_count: u32, uptime_count: u32, timings: Vec<(String, Duration)>, } criterion_group!( tcp_benches, bench_tcp_os_fingerprinting, bench_tcp_mtu_detection, bench_tcp_uptime_calculation, bench_tcp_processing_overhead, bench_tcp_parallel_processing, generate_final_report ); criterion_main!(tcp_benches); /// Calculate throughput in packets per second fn calculate_throughput(duration: Duration, packet_count: usize) -> f64 { let seconds = duration.as_secs_f64(); if seconds > 0.0 { (packet_count as f64) / seconds } else { 0.0 } } /// Format throughput for display fn format_throughput(pps: f64) -> String { if pps >= 1_000_000.0 { format!("{:.2}M", pps / 1_000_000.0) } else if pps >= 1_000.0 { format!("{:.1}k", pps / 1_000.0) } else { format!("{pps:.0}") } } /// Calculate overhead percentage fn calculate_overhead(baseline: Duration, target: Duration) -> f64 { let baseline_ns = baseline.as_nanos() as f64; let target_ns = target.as_nanos() as f64; if baseline_ns > 0.0 { ((target_ns - baseline_ns) / baseline_ns) * 100.0 } else { 0.0 } } /// Measure average execution time for a benchmark fn measure_average_time<F>(mut f: F, iterations: usize) -> Duration where F: FnMut(), { let start = std::time::Instant::now(); for _ in 0..iterations { f(); } start .elapsed() .checked_div(iterations as u32) .unwrap_or(Duration::ZERO) } /// Generate comprehensive benchmark report fn generate_final_report(_c: &mut Criterion) { let report = match BENCHMARK_RESULTS.lock() { Ok(guard) => guard.clone(), Err(_) => return, }; let Some(report) = report else { return; }; println!("\n"); println!("==============================================================================="); println!(" TCP BENCHMARK ANALYSIS REPORT "); println!("==============================================================================="); println!(); println!("PCAP Analysis Summary:"); println!(" - Total packets analyzed: {}", report.packet_count); println!(" - SYN packets: {}", report.syn_count); println!(" - SYN-ACK packets: {}", report.syn_ack_count); println!(" - MTU detections: {}", report.mtu_count); println!(" - Uptime calculations: {}", report.uptime_count); let effectiveness = ((report .syn_count .saturating_add(report.syn_ack_count) .saturating_add(report.mtu_count) .saturating_add(report.uptime_count)) as f64 / report.packet_count as f64) * 100.0; println!(" - Analysis effectiveness: {effectiveness:.1}%"); println!(); if report.timings.is_empty() { return; } // Find key timings for calculations let parsing_time = report .timings .iter() .find(|(name, _)| name.contains("parsing")) .map(|(_, t)| *t); let tcp_no_os = report .timings .iter() .find(|(name, _)| name.contains("without_os")) .map(|(_, t)| *t); let tcp_with_os = report .timings .iter() .find(|(name, _)| name.contains("with_os")) .map(|(_, t)| *t); let full_analysis = report .timings .iter() .find(|(name, _)| name.contains("full_tcp_analysis")) .map(|(_, t)| *t); let mtu_no_link = report .timings .iter() .find(|(name, _)| name.contains("mtu_without")) .map(|(_, t)| *t); let mtu_with_link = report .timings .iter() .find(|(name, _)| name.contains("mtu_with")) .map(|(_, t)| *t); println!("Performance Summary:"); println!("+--------------------------------------------------------------------------+"); println!("| Operation | Time/Packet | Throughput | vs Parsing |"); println!("+--------------------------------------------------------------------------+"); for (name, duration) in &report.timings { let per_packet = duration .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let pps_str = format_throughput(throughput); let overhead_str = if let Some(baseline) = parsing_time { if name != "tcp_packet_parsing" { let overhead = calculate_overhead(baseline, per_packet); format!("{:>7.0}x", overhead / 100.0 + 1.0) } else { " 1.0x ".to_string() } } else { " - ".to_string() }; let display_name = name .replace("tcp_", "") .replace("_", " ") .chars() .take(32) .collect::<String>(); println!( "| {display_name:<32} | {per_packet:>11.3?} | {pps_str:>9} pps | {overhead_str:>10} |" ); } println!("+--------------------------------------------------------------------------+"); println!(); // Overhead Analysis if let (Some(parsing), Some(full)) = (parsing_time, full_analysis) { let overhead = calculate_overhead(parsing, full); println!("Overhead Analysis:"); println!( " - Parsing -> Full Analysis: {:.0}x overhead (expected for comprehensive analysis)", overhead / 100.0 + 1.0 ); } if let (Some(no_os), Some(with_os)) = (tcp_no_os, tcp_with_os) { let overhead = calculate_overhead(no_os, with_os); println!(" - TCP without OS -> with OS: {overhead:.1}% (database lookup cost)"); } if let (Some(no_link), Some(with_link)) = (mtu_no_link, mtu_with_link) { let overhead = calculate_overhead(no_link, with_link); println!(" - MTU without link -> with link: {overhead:.1}% (MTU database matching)"); } println!(); // Capacity Planning if let Some(full) = full_analysis { let per_packet = full .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!("Capacity Planning:"); println!(); println!("Sequential Mode (1 core):"); println!(" - Throughput: {} packets/second", format_throughput(throughput)); println!( " - 1 Gbps (81,274 pps): {:.1}% CPU{}", cpu_1gbps, if cpu_1gbps > 100.0 { " [OVERLOAD]" } else { "" } ); println!( " - 10 Gbps (812,740 pps): {:.1}% CPU{}", cpu_10gbps, if cpu_10gbps > 100.0 { " [OVERLOAD]" } else { "" } ); // Parallel Mode Analysis let parallel_2 = report .timings .iter() .find(|(name, _)| name.contains("parallel_2_workers")) .map(|(_, t)| *t); let parallel_4 = report .timings .iter() .find(|(name, _)| name.contains("parallel_4_workers")) .map(|(_, t)| *t); let parallel_8 = report .timings .iter() .find(|(name, _)| name.contains("parallel_8_workers")) .map(|(_, t)| *t); if parallel_2.is_some() || parallel_4.is_some() || parallel_8.is_some() { println!(); println!("Parallel Mode Performance:"); println!(); let available_cpus = std::thread::available_parallelism() .map(|n| n.get()) .unwrap_or(1); println!(" System CPUs: {available_cpus}"); println!(); if let Some(p2) = parallel_2 { let per_packet = p2 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); println!(" 2 Workers:"); println!(" - Throughput: {} pps", format_throughput(throughput)); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!(" - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU"); println!(" - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU"); } if let Some(p4) = parallel_4 { let per_packet = p4 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); println!(); println!(" 4 Workers:"); println!(" - Throughput: {} pps", format_throughput(throughput)); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!(" - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU"); println!(" - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU"); } if let Some(p8) = parallel_8 { let per_packet = p8 .checked_div(report.packet_count as u32) .unwrap_or(Duration::ZERO); let throughput = calculate_throughput(per_packet, 1); println!(); println!(" 8 Workers:"); println!(" - Throughput: {} pps", format_throughput(throughput)); let cpu_1gbps = (81274.0 / throughput) * 100.0; let cpu_10gbps = (812740.0 / throughput) * 100.0; println!(" - 1 Gbps (81,274 pps): {cpu_1gbps:.1}% CPU"); println!(" - 10 Gbps (812,740 pps): {cpu_10gbps:.1}% CPU"); } println!(); println!("Note: TCP uses hash-based worker assignment for stateful connections"); println!(" Parallel benchmarks include worker pool overhead"); } } println!(); println!("Benchmark report generation complete"); println!(); } fn load_packets_from_pcap(pcap_path: &str) -> Result<Vec<Vec<u8>>, Box<dyn Error>> { let file = File::open(pcap_path)?; let mut pcap_reader = PcapReader::new(file)?; let mut packets = Vec::new(); while let Some(pkt) = pcap_reader.next_packet() { packets.push(pkt?.data.into()); } Ok(packets) } /// Load packets from PCAP and repeat them for stable benchmarking fn load_packets_repeated(pcap_path: &str, repeat: usize) -> Result<Vec<Vec<u8>>, Box<dyn Error>> { let packets = load_packets_from_pcap(pcap_path)?; if packets.is_empty() { return Ok(packets); } // Repeat packets to get a stable benchmark dataset let capacity = packets.len().saturating_mul(repeat); let mut repeated = Vec::with_capacity(capacity); for _ in 0..repeat { repeated.extend(packets.iter().cloned()); } Ok(repeated) } /// Process a packet using the public TCP API fn process_tcp_packet( packet: &[u8], connection_tracker: &mut TtlCache<ConnectionKey, TcpTimestamp>, matcher: Option<&SignatureMatcher>, ) -> Option<huginn_net_tcp::TcpAnalysisResult> { match huginn_net_tcp::packet_parser::parse_packet(packet) { huginn_net_tcp::packet_parser::IpPacket::Ipv4(ipv4) => { process_ipv4_packet(&ipv4, connection_tracker, matcher).ok() } huginn_net_tcp::packet_parser::IpPacket::Ipv6(ipv6) => { process_ipv6_packet(&ipv6, connection_tracker, matcher).ok() } huginn_net_tcp::packet_parser::IpPacket::None => None, } } /// Benchmark TCP OS fingerprinting using macOS TCP flags PCAP fn bench_tcp_os_fingerprinting(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/macos_tcp_flags.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load macOS TCP flags PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in macOS TCP flags PCAP file"); return; } let db = match Database::load_default() { Ok(db) => db, Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; let matcher = SignatureMatcher::new(&db); println!("TCP OS Fingerprinting Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count TCP analysis results let mut connection_tracker = TtlCache::new(1000); let mut syn_count: u32 = 0; let mut syn_ack_count: u32 = 0; let mut mtu_count: u32 = 0; let mut uptime_count: u32 = 0; for packet in &packets { if let Some(result) = process_tcp_packet(packet, &mut connection_tracker, Some(&matcher)) { if result.syn.is_some() { syn_count = syn_count.saturating_add(1); } if result.syn_ack.is_some() { syn_ack_count = syn_ack_count.saturating_add(1); } if result.mtu.is_some() { mtu_count = mtu_count.saturating_add(1); } if result.client_uptime.is_some() || result.server_uptime.is_some() { uptime_count = uptime_count.saturating_add(1); } } } println!(" SYN packets: {syn_count}"); println!(" SYN-ACK packets: {syn_ack_count}"); println!(" MTU detections: {mtu_count}"); println!(" Uptime calculations: {uptime_count}"); println!("--------------------"); // Initialize benchmark report if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { *guard = Some(BenchmarkReport { packet_count: packets.len(), syn_count, syn_ack_count, mtu_count, uptime_count, timings: Vec::new(), }); } let mut group = c.benchmark_group("TCP_OS_Fingerprinting"); // Benchmark TCP processing with OS fingerprinting group.bench_function("tcp_with_os_matching", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, Some(&matcher)); } }) }); // Benchmark TCP processing without OS matching group.bench_function("tcp_without_os_matching", |b| { b.iter(|| { let mut tracker = TtlCache::new(1000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, None); } }) }); // Benchmark raw packet parsing only group.bench_function("tcp_packet_parsing", |b| { b.iter(|| { for packet in packets.iter() { let _ = huginn_net_tcp::packet_parser::parse_packet(packet); } }) }); group.finish(); // Measure and store actual times for reporting let tcp_with_os_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, Some(&matcher)); } }, 10, ); let tcp_without_os_time = measure_average_time( || { let mut tracker = TtlCache::new(1000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, None); } }, 10, ); let parsing_time = measure_average_time( || { for packet in packets.iter() { let _ = huginn_net_tcp::packet_parser::parse_packet(packet); } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("tcp_with_os_matching".to_string(), tcp_with_os_time)); report .timings .push(("tcp_without_os_matching".to_string(), tcp_without_os_time)); report .timings .push(("tcp_packet_parsing".to_string(), parsing_time)); } } } /// Benchmark TCP MTU detection performance fn bench_tcp_mtu_detection(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/macos_tcp_flags.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TCP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TCP PCAP file"); return; } let db = match Database::load_default() { Ok(db) => db, Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; println!("TCP MTU Detection Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count MTU detections let matcher = SignatureMatcher::new(&db); let mut connection_tracker = TtlCache::new(1000); let mut mtu_detections: u32 = 0; for packet in &packets { if let Some(result) = process_tcp_packet(packet, &mut connection_tracker, Some(&matcher)) { if result.mtu.is_some() { mtu_detections = mtu_detections.saturating_add(1); } } } println!(" MTU detections: {mtu_detections}"); println!("--------------------"); let mut group = c.benchmark_group("TCP_MTU_Detection"); // Benchmark MTU detection with link matching group.bench_function("mtu_with_link_matching", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, Some(&matcher)) { if let Some(mtu_output) = result.mtu { // Access MTU information to ensure full processing let _ = mtu_output.mtu; let _ = &mtu_output.link; } } } }) }); // Benchmark MTU detection without link matching group.bench_function("mtu_without_link_matching", |b| { b.iter(|| { let mut tracker = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, None) { if let Some(mtu_output) = result.mtu { // Access MTU value only let _ = mtu_output.mtu; } } } }) }); group.finish(); // Measure and store MTU detection times let mtu_with_link_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, Some(&matcher)) { if let Some(mtu_output) = result.mtu { let _ = mtu_output.mtu; let _ = &mtu_output.link; } } } }, 10, ); let mtu_without_link_time = measure_average_time( || { let mut tracker = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, None) { if let Some(mtu_output) = result.mtu { let _ = mtu_output.mtu; } } } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("mtu_with_link_matching".to_string(), mtu_with_link_time)); report .timings .push(("mtu_without_link_matching".to_string(), mtu_without_link_time)); } } } /// Benchmark TCP uptime calculation performance fn bench_tcp_uptime_calculation(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/macos_tcp_flags.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TCP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TCP PCAP file"); return; } println!("TCP Uptime Calculation Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); // Count uptime calculations let mut connection_tracker = TtlCache::new(1000); let mut uptime_calculations: u32 = 0; for packet in &packets { if let Some(result) = process_tcp_packet(packet, &mut connection_tracker, None) { if result.client_uptime.is_some() || result.server_uptime.is_some() { uptime_calculations = uptime_calculations.saturating_add(1); } } } println!(" Uptime calculations: {uptime_calculations}"); println!("--------------------"); let mut group = c.benchmark_group("TCP_Uptime_Calculation"); // Benchmark uptime calculation with connection tracking group.bench_function("uptime_with_tracking", |b| { b.iter(|| { let mut tracker = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, None) { if let Some(uptime_output) = result.client_uptime { let _ = uptime_output.days; let _ = uptime_output.hours; let _ = uptime_output.min; let _ = uptime_output.freq; } if let Some(uptime_output) = result.server_uptime { let _ = uptime_output.days; let _ = uptime_output.hours; let _ = uptime_output.min; let _ = uptime_output.freq; } } } }) }); // Benchmark TCP processing with different cache sizes group.bench_function("uptime_small_cache", |b| { b.iter(|| { let mut tracker = TtlCache::new(100); // Smaller cache for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, None); } }) }); group.bench_function("uptime_large_cache", |b| { b.iter(|| { let mut tracker = TtlCache::new(10000); // Larger cache for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, None); } }) }); group.finish(); // Measure and store uptime calculation times let uptime_with_tracking_time = measure_average_time( || { let mut tracker = TtlCache::new(1000); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, None) { if let Some(uptime_output) = result.client_uptime { let _ = uptime_output.days; let _ = uptime_output.hours; let _ = uptime_output.min; let _ = uptime_output.freq; } if let Some(uptime_output) = result.server_uptime { let _ = uptime_output.days; let _ = uptime_output.hours; let _ = uptime_output.min; let _ = uptime_output.freq; } } } }, 10, ); let uptime_small_cache_time = measure_average_time( || { let mut tracker = TtlCache::new(100); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, None); } }, 10, ); let uptime_large_cache_time = measure_average_time( || { let mut tracker = TtlCache::new(10000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, None); } }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("uptime_with_tracking".to_string(), uptime_with_tracking_time)); report .timings .push(("uptime_small_cache".to_string(), uptime_small_cache_time)); report .timings .push(("uptime_large_cache".to_string(), uptime_large_cache_time)); } } } /// Benchmark TCP processing overhead analysis fn bench_tcp_processing_overhead(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/macos_tcp_flags.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TCP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TCP PCAP file"); return; } let db = match Database::load_default() { Ok(db) => db, Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; println!("TCP Processing Overhead Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); println!("--------------------"); let mut group = c.benchmark_group("TCP_Processing_Overhead"); // Benchmark minimal TCP processing group.bench_function("minimal_processing", |b| { b.iter(|| { for packet in packets.iter() { let _ = huginn_net_tcp::packet_parser::parse_packet(packet); } }) }); // Benchmark full TCP analysis group.bench_function("full_tcp_analysis", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, Some(&matcher)); } }) }); // Benchmark with result collection group.bench_function("full_analysis_with_collection", |b| { b.iter(|| { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); let mut results = Vec::new(); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, Some(&matcher)) { // Collect all TCP analysis results results.push(( result.syn.is_some(), result.syn_ack.is_some(), result.mtu.map(|m| m.mtu), result .client_uptime .map(|u| u.days) .or_else(|| result.server_uptime.map(|u| u.days)), )); } } // Process results to simulate real-world usage let _ = results.len(); }) }); group.finish(); // Measure and store processing overhead times let minimal_processing_time = measure_average_time( || { for packet in packets.iter() { let _ = huginn_net_tcp::packet_parser::parse_packet(packet); } }, 10, ); let full_tcp_analysis_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); for packet in packets.iter() { let _ = process_tcp_packet(packet, &mut tracker, Some(&matcher)); } }, 10, ); let full_analysis_with_collection_time = measure_average_time( || { let matcher = SignatureMatcher::new(&db); let mut tracker = TtlCache::new(1000); let mut results = Vec::new(); for packet in packets.iter() { if let Some(result) = process_tcp_packet(packet, &mut tracker, Some(&matcher)) { results.push(( result.syn.is_some(), result.syn_ack.is_some(), result.mtu.map(|m| m.mtu), result .client_uptime .map(|u| u.days) .or_else(|| result.server_uptime.map(|u| u.days)), )); } } let _ = results.len(); }, 10, ); if let Ok(mut guard) = BENCHMARK_RESULTS.lock() { if let Some(ref mut report) = *guard { report .timings .push(("minimal_processing".to_string(), minimal_processing_time)); report .timings .push(("full_tcp_analysis".to_string(), full_tcp_analysis_time)); report.timings.push(( "full_analysis_with_collection".to_string(), full_analysis_with_collection_time, )); } } } /// Benchmark TCP parallel processing with different worker counts fn bench_tcp_parallel_processing(c: &mut Criterion) { let packets = match load_packets_repeated("../pcap/macos_tcp_flags.pcap", REPEAT_COUNT) { Ok(pkts) => pkts, Err(e) => { eprintln!("Failed to load TCP PCAP file: {e}"); return; } }; if packets.is_empty() { eprintln!("No packets found in TCP PCAP file"); return; } let db = match Database::load_default() { Ok(db) => std::sync::Arc::new(db), Err(e) => { eprintln!("Failed to load default database: {e}"); return; } }; println!("TCP Parallel Processing Analysis:"); println!(" Total packets: {} (repeated {}x)", packets.len(), REPEAT_COUNT); println!("--------------------"); let worker_counts = [2, 4, 8]; let mut group = c.benchmark_group("TCP_Parallel_Processing"); for &num_workers in &worker_counts { let bench_name = format!("parallel_{num_workers}_workers"); group.bench_function(&bench_name, |b| { b.iter(|| { let (tx, rx) = std::sync::mpsc::channel(); let pool = match huginn_net_tcp::parallel::WorkerPool::new( num_workers, 100, 32, 10, tx, Some(db.clone()), 1000, None, ) { Ok(p) => p, Err(e) => panic!("Failed to create worker pool: {e}"), }; // Dispatch all packets for packet in packets.iter() { let _ = pool.dispatch(packet.clone()); } // Shutdown and collect results pool.shutdown(); let mut _result_count: usize = 0; while rx.recv().is_ok() {
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
true
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/examples/capture.rs
examples/capture.rs
use clap::{Parser, Subcommand}; use huginn_net::output::FingerprintResult; use huginn_net::{Database, FilterConfig, HuginnNet, IpFilter, PortFilter}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Arc; use std::thread; use tracing::{debug, error, info}; use tracing_appender::rolling::{RollingFileAppender, Rotation}; use tracing_subscriber::fmt; use tracing_subscriber::fmt::writer::MakeWriterExt; use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { #[command(subcommand)] command: Commands, #[command(flatten)] filter: FilterOptions, /// Log file path #[arg(short = 'l', long = "log-file")] log_file: Option<String>, } #[derive(Parser, Debug)] struct FilterOptions { #[arg(short = 'p', long = "port")] port: Option<u16>, #[arg(short = 'I', long = "ip")] ip: Option<String>, } #[derive(Subcommand, Debug)] enum Commands { Live { /// Network interface name #[arg(short = 'i', long)] interface: String, }, Pcap { /// Path to PCAP file #[arg(short = 'f', long)] file: String, }, } fn initialize_logging(log_file: Option<String>) { let console_writer = std::io::stdout.with_max_level(tracing::Level::INFO); let file_appender = if let Some(log_file) = log_file { RollingFileAppender::new(Rotation::NEVER, ".", log_file) .with_max_level(tracing::Level::INFO) } else { RollingFileAppender::new(Rotation::NEVER, ".", "default.log") .with_max_level(tracing::Level::INFO) }; let writer = console_writer.and(file_appender); let subscriber = fmt() .with_env_filter(EnvFilter::from_default_env()) .with_writer(writer) .finish(); if let Err(e) = tracing::subscriber::set_global_default(subscriber) { error!("Failed to set subscriber: {e}"); std::process::exit(1); } } fn build_filter(filter_options: &FilterOptions) -> Option<FilterConfig> { let has_port = filter_options.port.is_some(); let has_ip = filter_options.ip.is_some(); if !has_port && !has_ip { return None; } let mut filter = FilterConfig::new(); if let Some(dst_port) = filter_options.port { filter = filter.with_port_filter(PortFilter::new().destination(dst_port)); info!("Filter: destination port {}", dst_port); } if let Some(ip_str) = &filter_options.ip { match IpFilter::new().allow(ip_str) { Ok(ip_filter) => { filter = filter.with_ip_filter(ip_filter); info!("Filter: IP address {}", ip_str); } Err(e) => { error!("Invalid IP address '{}': {}", ip_str, e); std::process::exit(1); // Exit on invalid IP } } } Some(filter) } fn main() { let args = Args::parse(); initialize_logging(args.log_file); let (sender, receiver): (Sender<FingerprintResult>, Receiver<FingerprintResult>) = mpsc::channel(); let cancel_signal = Arc::new(AtomicBool::new(false)); let ctrl_c_signal = cancel_signal.clone(); let thread_cancel_signal = cancel_signal.clone(); if let Err(e) = ctrlc::set_handler(move || { info!("Received signal, initiating graceful shutdown..."); ctrl_c_signal.store(true, Ordering::Relaxed); }) { error!("Error setting signal handler: {e}"); return; } thread::spawn(move || { let db = match Database::load_default() { Ok(db) => db, Err(e) => { error!("Failed to load default database: {e}"); return; } }; debug!("Loaded database: {:?}", db); let filter_config = build_filter(&args.filter); let mut analyzer = match HuginnNet::new(Some(&db), 100, None) { Ok(analyzer) => analyzer, Err(e) => { error!("Failed to create HuginnNet analyzer: {e}"); return; } }; if let Some(ref filter_cfg) = filter_config { analyzer = analyzer.with_filter(filter_cfg.clone()); } let result = match args.command { Commands::Live { interface } => { info!("Starting live capture on interface: {interface}"); analyzer.analyze_network(&interface, sender, Some(thread_cancel_signal.clone())) } Commands::Pcap { file } => { info!("Analyzing PCAP file: {file}"); analyzer.analyze_pcap(&file, sender, Some(thread_cancel_signal)) } }; if let Err(e) = result { error!("Analysis failed: {e}"); } }); for output in receiver { if cancel_signal.load(Ordering::Relaxed) { info!("Shutdown signal received, stopping result processing"); break; } if let Some(tcp_syn) = output.tcp_syn { info!("{tcp_syn}"); } if let Some(tcp_syn_ack) = output.tcp_syn_ack { info!("{tcp_syn_ack}"); } if let Some(tcp_mtu) = output.tcp_mtu { info!("{tcp_mtu}"); } if let Some(tcp_client_uptime) = output.tcp_client_uptime { info!("{tcp_client_uptime}"); } if let Some(tcp_server_uptime) = output.tcp_server_uptime { info!("{tcp_server_uptime}"); } if let Some(http_request) = output.http_request { info!("{http_request}"); } if let Some(http_response) = output.http_response { info!("{http_response}"); } if let Some(tls_client) = output.tls_client { info!("{tls_client}"); } } info!("Analysis shutdown completed"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/examples/capture-tcp.rs
examples/capture-tcp.rs
use clap::{Parser, Subcommand}; use huginn_net_db::Database; use huginn_net_tcp::{FilterConfig, HuginnNetTcp, IpFilter, PortFilter, TcpAnalysisResult}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Arc; use std::thread; use tracing::{debug, error, info}; use tracing_appender::rolling::{RollingFileAppender, Rotation}; use tracing_subscriber::fmt; use tracing_subscriber::fmt::writer::MakeWriterExt; use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { #[command(subcommand)] command: Commands, #[command(flatten)] filter: FilterOptions, #[arg(short = 'l', long = "log-file")] log_file: Option<String>, } #[derive(Parser, Debug)] struct FilterOptions { #[arg(short = 'p', long = "port")] port: Option<u16>, #[arg(short = 'I', long = "ip")] ip: Option<String>, } #[derive(Subcommand, Debug)] enum Commands { Single { #[arg(short = 'i', long)] interface: String, }, Parallel { #[arg(short = 'i', long)] interface: String, #[arg(short = 'w', long = "workers")] workers: usize, #[arg(short = 'q', long = "queue-size", default_value = "100")] queue_size: usize, }, } fn initialize_logging(log_file: Option<String>) { let console_writer = std::io::stdout.with_max_level(tracing::Level::INFO); let file_appender = if let Some(log_file) = log_file { RollingFileAppender::new(Rotation::NEVER, ".", log_file) .with_max_level(tracing::Level::INFO) } else { RollingFileAppender::new(Rotation::NEVER, ".", "default.log") .with_max_level(tracing::Level::INFO) }; let writer = console_writer.and(file_appender); let subscriber = fmt() .with_env_filter(EnvFilter::from_default_env()) .with_writer(writer) .finish(); if let Err(e) = tracing::subscriber::set_global_default(subscriber) { eprintln!("Failed to set subscriber: {e}"); std::process::exit(1); } } fn build_filter(filter_options: &FilterOptions) -> Option<FilterConfig> { let has_port = filter_options.port.is_some(); let has_ip = filter_options.ip.is_some(); if !has_port && !has_ip { return None; } let mut filter = FilterConfig::new(); if let Some(dst_port) = filter_options.port { filter = filter.with_port_filter(PortFilter::new().destination(dst_port)); info!("Filter: destination port {}", dst_port); } if let Some(ip_str) = &filter_options.ip { match IpFilter::new().allow(ip_str) { Ok(ip_filter) => { filter = filter.with_ip_filter(ip_filter); info!("Filter: IP address {}", ip_str); } Err(e) => { error!("Invalid IP address '{}': {}", ip_str, e); std::process::exit(1); } } } Some(filter) } fn main() { let args = Args::parse(); initialize_logging(args.log_file); let (sender, receiver): (Sender<TcpAnalysisResult>, Receiver<TcpAnalysisResult>) = mpsc::channel(); let db = match Database::load_default() { Ok(db) => Arc::new(db), Err(e) => { error!("Failed to load default database: {e}"); return; } }; debug!("Loaded p0f database successfully"); let filter_config = build_filter(&args.filter); let mut analyzer = match &args.command { Commands::Single { .. } => { info!("Using sequential mode"); let mut analyzer = match HuginnNetTcp::new(Some(db), 1000) { Ok(analyzer) => analyzer, Err(e) => { error!("Failed to create HuginnNetTcp analyzer: {e}"); return; } }; if let Some(ref filter_cfg) = filter_config { analyzer = analyzer.with_filter(filter_cfg.clone()); info!("Packet filtering enabled"); } analyzer } Commands::Parallel { workers, queue_size, .. } => { info!("Using parallel mode with {workers} workers, queue_size={queue_size}"); let mut analyzer = match HuginnNetTcp::with_config(Some(db), 1000, *workers, *queue_size, 32, 10) { Ok(analyzer) => analyzer, Err(e) => { error!("Failed to create HuginnNetTcp analyzer: {e}"); return; } }; if let Some(ref filter_cfg) = filter_config { analyzer = analyzer.with_filter(filter_cfg.clone()); info!("Packet filtering enabled"); } analyzer } }; // Initialize pool if parallel mode if matches!(&args.command, Commands::Parallel { .. }) { if let Err(e) = analyzer.init_pool(sender.clone()) { error!("Failed to initialize worker pool: {e}"); return; } } // Get pool reference before moving analyzer let worker_pool_monitor = analyzer.worker_pool(); let worker_pool_shutdown = worker_pool_monitor.clone(); let cancel_signal = Arc::new(AtomicBool::new(false)); let ctrl_c_signal = cancel_signal.clone(); let thread_cancel_signal = cancel_signal.clone(); // Setup Ctrl-C handler with pool shutdown if let Err(e) = ctrlc::set_handler(move || { info!("Received signal, initiating graceful shutdown..."); ctrl_c_signal.store(true, Ordering::Relaxed); // Shutdown worker pool if it exists if let Some(ref pool) = worker_pool_shutdown { pool.shutdown(); } }) { error!("Error setting signal handler: {e}"); return; } let analyzer_shared = Arc::new(std::sync::Mutex::new(analyzer)); thread::spawn(move || { let interface = match &args.command { Commands::Single { interface } => interface.clone(), Commands::Parallel { interface, .. } => interface.clone(), }; let result = { let mut analyzer = match analyzer_shared.lock() { Ok(a) => a, Err(_) => { error!("Failed to lock analyzer"); return; } }; analyzer.analyze_network(&interface, sender, Some(thread_cancel_signal)) }; if let Err(e) = result { error!("TCP analysis failed: {e}"); } }); const LOG_STATS_EVERY: u64 = 1000; let mut packet_count: u64 = 0; for output in receiver { if cancel_signal.load(Ordering::Relaxed) { info!("Shutdown signal received, stopping result processing"); break; } if let Some(syn) = output.syn { info!("{syn}"); } if let Some(syn_ack) = output.syn_ack { info!("{syn_ack}"); } if let Some(mtu) = output.mtu { info!("{mtu}"); } if let Some(client_uptime) = output.client_uptime { info!("{client_uptime}"); } if let Some(server_uptime) = output.server_uptime { info!("{server_uptime}"); } if let Some(ref pool) = worker_pool_monitor { packet_count = packet_count.saturating_add(1); if packet_count % LOG_STATS_EVERY == 0 { let stats = pool.stats(); info!("{stats}"); } } } info!("Analysis shutdown completed"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/examples/capture-tls.rs
examples/capture-tls.rs
use clap::{Parser, Subcommand}; use huginn_net_tls::{FilterConfig, HuginnNetTls, IpFilter, PortFilter, TlsClientOutput}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Arc; use std::thread; use tracing::{error, info}; use tracing_appender::rolling::{RollingFileAppender, Rotation}; use tracing_subscriber::fmt; use tracing_subscriber::fmt::writer::MakeWriterExt; use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { #[command(subcommand)] command: Commands, #[command(flatten)] filter: FilterOptions, #[arg(short = 'l', long = "log-file")] log_file: Option<String>, } #[derive(Parser, Debug)] struct FilterOptions { #[arg(short = 'p', long = "port")] port: Option<u16>, #[arg(short = 'I', long = "ip")] ip: Option<String>, } #[derive(Subcommand, Debug)] enum Commands { Single { #[arg(short = 'i', long)] interface: String, }, Parallel { #[arg(short = 'i', long)] interface: String, #[arg(short = 'w', long = "workers")] workers: usize, #[arg(short = 'q', long = "queue-size", default_value = "100")] queue_size: usize, #[arg(short = 'b', long = "batch-size", default_value = "32")] batch_size: usize, #[arg(short = 't', long = "timeout-ms", default_value = "10")] timeout_ms: u64, }, } fn initialize_logging(log_file: Option<String>) { let console_writer = std::io::stdout.with_max_level(tracing::Level::INFO); let file_appender = if let Some(log_file) = log_file { RollingFileAppender::new(Rotation::NEVER, ".", log_file) .with_max_level(tracing::Level::INFO) } else { RollingFileAppender::new(Rotation::NEVER, ".", "tls-capture.log") .with_max_level(tracing::Level::INFO) }; let writer = console_writer.and(file_appender); let subscriber = fmt() .with_env_filter(EnvFilter::from_default_env()) .with_writer(writer) .finish(); if let Err(e) = tracing::subscriber::set_global_default(subscriber) { eprintln!("Failed to set subscriber: {e}"); std::process::exit(1); } } fn build_filter(port: Option<u16>, ip: Option<String>) -> Option<FilterConfig> { let has_port = port.is_some(); let has_ip = ip.is_some(); if !has_port && !has_ip { return None; } let mut filter = FilterConfig::new(); if let Some(dst_port) = port { filter = filter.with_port_filter(PortFilter::new().destination(dst_port)); info!("Filter: destination port {}", dst_port); } if let Some(ip_str) = ip { match IpFilter::new().allow(&ip_str) { Ok(ip_filter) => { filter = filter.with_ip_filter(ip_filter); info!("Filter: IP address {}", ip_str); } Err(e) => { error!("Invalid IP address '{}': {}", ip_str, e); return None; } } } Some(filter) } fn main() { let args = Args::parse(); initialize_logging(args.log_file.clone()); let mut packet_count: u64 = 0; // Log stats every N packets const LOG_STATS_EVERY: u64 = 100; info!("Starting TLS-only capture example"); let (sender, receiver): (Sender<TlsClientOutput>, Receiver<TlsClientOutput>) = mpsc::channel(); let cancel_signal = Arc::new(AtomicBool::new(false)); let ctrl_c_signal = cancel_signal.clone(); let thread_cancel_signal = cancel_signal.clone(); let mut analyzer = match &args.command { Commands::Single { .. } => { info!("Using sequential mode"); let mut analyzer = HuginnNetTls::new(); if let Some(filter_config) = build_filter(args.filter.port, args.filter.ip.clone()) { analyzer = analyzer.with_filter(filter_config); info!("Packet filtering enabled"); } analyzer } Commands::Parallel { workers, queue_size, batch_size, timeout_ms, .. } => { info!("Using parallel mode: workers={workers}, queue_size={queue_size}, batch_size={batch_size}, timeout_ms={timeout_ms}"); let mut analyzer = HuginnNetTls::with_config(*workers, *queue_size, *batch_size, *timeout_ms); if let Some(filter_config) = build_filter(args.filter.port, args.filter.ip.clone()) { analyzer = analyzer.with_filter(filter_config); info!("Packet filtering enabled"); } analyzer } }; // Initialize pool if parallel mode (before moving analyzer to thread) if matches!(&args.command, Commands::Parallel { .. }) { if let Err(e) = analyzer.init_pool(sender.clone()) { error!("Failed to initialize worker pool: {e}"); return; } } // Get pool reference before moving analyzer let worker_pool_monitor = analyzer.worker_pool(); let worker_pool_shutdown = worker_pool_monitor.clone(); // Setup Ctrl-C handler with pool shutdown if let Err(e) = ctrlc::set_handler(move || { info!("Received signal, initiating graceful shutdown..."); ctrl_c_signal.store(true, Ordering::Relaxed); // Shutdown worker pool if it exists if let Some(ref pool) = worker_pool_shutdown { pool.shutdown(); } }) { error!("Error setting signal handler: {e}"); return; } let analyzer_shared = Arc::new(std::sync::Mutex::new(analyzer)); thread::spawn(move || { let interface = match &args.command { Commands::Single { interface, .. } => interface.clone(), Commands::Parallel { interface, .. } => interface.clone(), }; let result = { let mut analyzer = match analyzer_shared.lock() { Ok(a) => a, Err(_) => { error!("Failed to lock analyzer"); return; } }; analyzer.analyze_network(&interface, sender, Some(thread_cancel_signal)) }; if let Err(e) = result { error!("TLS analysis failed: {e}"); } }); for output in receiver { if cancel_signal.load(Ordering::Relaxed) { info!("Shutdown signal received, stopping result processing"); break; } info!("{output}"); if let Some(ref pool) = worker_pool_monitor { packet_count = packet_count.saturating_add(1); if packet_count % LOG_STATS_EVERY == 0 { let stats = pool.stats(); info!("{stats}"); } } } info!("Analysis shutdown completed"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/examples/capture-http.rs
examples/capture-http.rs
use clap::{Parser, Subcommand}; use huginn_net_db::Database; use huginn_net_http::{FilterConfig, HttpAnalysisResult, HuginnNetHttp, IpFilter, PortFilter}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Arc; use std::thread; use tracing::{debug, error, info}; use tracing_appender::rolling::{RollingFileAppender, Rotation}; use tracing_subscriber::fmt; use tracing_subscriber::fmt::writer::MakeWriterExt; use tracing_subscriber::EnvFilter; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] struct Args { #[command(subcommand)] command: Commands, #[command(flatten)] filter: FilterOptions, #[arg(short = 'l', long = "log-file")] log_file: Option<String>, } #[derive(Parser, Debug)] struct FilterOptions { #[arg(short = 'p', long = "port")] port: Option<u16>, #[arg(short = 'I', long = "ip")] ip: Option<String>, } #[derive(Subcommand, Debug)] enum Commands { Single { #[arg(short = 'i', long)] interface: String, }, Parallel { #[arg(short = 'i', long)] interface: String, #[arg(short = 'w', long = "workers")] workers: usize, #[arg(short = 'q', long = "queue-size", default_value = "100")] queue_size: usize, }, } fn initialize_logging(log_file: Option<String>) { let console_writer = std::io::stdout.with_max_level(tracing::Level::INFO); let file_appender = if let Some(log_file) = log_file { RollingFileAppender::new(Rotation::NEVER, ".", log_file) .with_max_level(tracing::Level::INFO) } else { RollingFileAppender::new(Rotation::NEVER, ".", "default.log") .with_max_level(tracing::Level::INFO) }; let writer = console_writer.and(file_appender); let subscriber = fmt() .with_env_filter(EnvFilter::from_default_env()) .with_writer(writer) .finish(); if let Err(e) = tracing::subscriber::set_global_default(subscriber) { eprintln!("Failed to set subscriber: {e}"); std::process::exit(1); } } fn build_filter(filter_options: &FilterOptions) -> Option<FilterConfig> { let has_port = filter_options.port.is_some(); let has_ip = filter_options.ip.is_some(); if !has_port && !has_ip { return None; } let mut filter = FilterConfig::new(); if let Some(dst_port) = filter_options.port { filter = filter.with_port_filter(PortFilter::new().destination(dst_port)); info!("Filter: destination port {}", dst_port); } if let Some(ip_str) = &filter_options.ip { match IpFilter::new().allow(ip_str) { Ok(ip_filter) => { filter = filter.with_ip_filter(ip_filter); info!("Filter: IP address {}", ip_str); } Err(e) => { error!("Invalid IP address '{}': {}", ip_str, e); std::process::exit(1); } } } Some(filter) } fn main() { let args = Args::parse(); initialize_logging(args.log_file); info!("Starting HTTP-only capture example"); let (sender, receiver): (Sender<HttpAnalysisResult>, Receiver<HttpAnalysisResult>) = channel(); let cancel_signal = Arc::new(AtomicBool::new(false)); let ctrl_c_signal = cancel_signal.clone(); let thread_cancel_signal = cancel_signal.clone(); if let Err(e) = ctrlc::set_handler(move || { info!("Received signal, initiating graceful shutdown..."); ctrl_c_signal.store(true, Ordering::Relaxed); }) { error!("Error setting signal handler: {e}"); return; } thread::spawn(move || { let db = match Database::load_default() { Ok(db) => db, Err(e) => { error!("Failed to load default database: {e}"); return; } }; debug!("Loaded database: {:?}", db); let db_option = Some(Arc::new(db)); let filter_config = build_filter(&args.filter); match args.command { Commands::Single { interface } => { info!("Initializing HTTP analyzer in sequential mode"); let mut analyzer = match HuginnNetHttp::new(db_option, 1000) { Ok(analyzer) => analyzer, Err(e) => { error!("Failed to create HuginnNetHttp analyzer: {e}"); return; } }; if let Some(ref filter_cfg) = filter_config { analyzer = analyzer.with_filter(filter_cfg.clone()); info!("Packet filtering enabled"); } info!("Starting HTTP live capture on interface: {interface}"); if let Err(e) = analyzer.analyze_network(&interface, sender, Some(thread_cancel_signal)) { error!("HTTP analysis failed: {e}"); } } Commands::Parallel { interface, workers, queue_size } => { info!( "Initializing HTTP analyzer with {workers} worker threads (flow-based routing)" ); let mut analyzer = match HuginnNetHttp::with_config( db_option, 1000, workers, queue_size, 16, 10, ) { Ok(analyzer) => analyzer, Err(e) => { error!("Failed to create HuginnNetHttp analyzer: {e}"); return; } }; if let Some(ref filter_cfg) = filter_config { analyzer = analyzer.with_filter(filter_cfg.clone()); info!("Packet filtering enabled"); } if let Err(e) = analyzer.init_pool(sender.clone()) { error!("Failed to initialize worker pool: {e}"); return; } info!("Starting HTTP live capture on interface: {interface}"); if let Err(e) = analyzer.analyze_network(&interface, sender, Some(thread_cancel_signal)) { error!("HTTP analysis failed: {e}"); } } } }); for output in receiver { if cancel_signal.load(Ordering::Relaxed) { info!("Shutdown signal received, stopping result processing"); break; } if let Some(http_request) = output.http_request { info!("{http_request}"); } if let Some(http_response) = output.http_response { info!("{http_response}"); } } info!("Analysis shutdown completed"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/db.rs
huginn-net-db/src/db.rs
use crate::db_matching_trait::{DatabaseSignature, FingerprintDb, IndexKey, ObservedFingerprint}; use crate::http::{self, Version as HttpVersion}; use crate::observable_signals::{HttpRequestObservation, HttpResponseObservation, TcpObservation}; use crate::tcp::{self, IpVersion, PayloadSize}; use std::collections::HashMap; use std::fmt; use std::fmt::Display; use std::marker::PhantomData; use std::str::FromStr; use tracing::debug; /// Represents the database used by `P0f` to store signatures and associated metadata. /// The database contains signatures for analyzing TCP and HTTP traffic, as well as /// other metadata such as MTU mappings and user agent-to-operating system mappings. #[derive(Debug)] pub struct Database { pub classes: Vec<String>, pub mtu: Vec<(String, Vec<u16>)>, pub ua_os: Vec<(String, Option<String>)>, pub tcp_request: FingerprintCollection<TcpObservation, tcp::Signature, TcpIndexKey>, pub tcp_response: FingerprintCollection<TcpObservation, tcp::Signature, TcpIndexKey>, pub http_request: FingerprintCollection<HttpRequestObservation, http::Signature, HttpIndexKey>, pub http_response: FingerprintCollection<HttpResponseObservation, http::Signature, HttpIndexKey>, } /// Represents a label associated with a signature, which provides metadata about /// the signature, such as type, class, name, and optional flavor details. #[derive(Clone, Debug, PartialEq)] pub struct Label { pub ty: Type, pub class: Option<String>, pub name: String, pub flavor: Option<String>, } /// Enum representing the type of `Label`. /// - `Specified`: A specific label with well-defined characteristics. /// - `Generic`: A generic label with broader characteristics. #[derive(Clone, Debug, PartialEq)] pub enum Type { Specified, Generic, } impl fmt::Display for Type { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{self:?}") } } impl Database { /// Creates a default instance of the `Database` by parsing an embedded configuration file. /// This file (`config/p0f.fp` relative to the crate root) is expected to define the default /// signatures and mappings used for analysis. /// /// # Errors /// Returns `HuginnNetError::MissConfiguration` if the embedded default fingerprint file /// cannot be parsed. This indicates a critical issue with the bundled fingerprint data /// or the parser itself. pub fn load_default() -> Result<Self, crate::error::DatabaseError> { const DEFAULT_FP_CONTENTS: &str = include_str!("../config/p0f.fp"); Database::from_str(DEFAULT_FP_CONTENTS).map_err(|e| { crate::error::DatabaseError::InvalidConfiguration(format!( "Failed to parse embedded default p0f database: {e}" )) }) } } /// Index key for TCP signatures, used to optimize database lookups. /// /// This key is generated from a `tcp::Signature` and combines several /// of its most discriminative fields to allow for a fast initial filtering /// of potential matches in the signature database. The goal is to quickly /// narrow down the search space before performing more detailed and costly /// distance calculations. /// /// The fields included are chosen for their balance of providing good /// discrimination while not being overly specific to avoid missing matches /// due to minor variations (which are handled by the distance calculation). #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct TcpIndexKey { pub ip_version_key: IpVersion, pub olayout_key: String, pub pclass_key: PayloadSize, } impl IndexKey for TcpIndexKey {} /// Index key for HTTP signatures, used to optimize database lookups. /// /// This key is generated from a `http::Signature` /// to enable faster filtering of HTTP signatures. It combines key characteristics /// of an HTTP request or response. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct HttpIndexKey { pub http_version_key: HttpVersion, } impl IndexKey for HttpIndexKey {} #[derive(Debug)] pub struct FingerprintCollection<OF, DS, K> where OF: ObservedFingerprint<Key = K>, DS: DatabaseSignature<OF>, K: IndexKey, { pub entries: Vec<(Label, Vec<DS>)>, pub(crate) index: HashMap<K, Vec<(usize, usize)>>, _observed_marker: PhantomData<OF>, _database_sig_marker: PhantomData<DS>, _key_marker: PhantomData<K>, } impl<OF, DS, K> Default for FingerprintCollection<OF, DS, K> where OF: ObservedFingerprint<Key = K>, DS: DatabaseSignature<OF>, K: IndexKey, { fn default() -> Self { Self { entries: Vec::new(), index: HashMap::new(), _observed_marker: PhantomData, _database_sig_marker: PhantomData, _key_marker: PhantomData, } } } impl<OF, DS, K> FingerprintCollection<OF, DS, K> where OF: ObservedFingerprint<Key = K>, DS: DatabaseSignature<OF>, K: IndexKey, { /// Creates a new collection and builds an index for it. pub fn new(entries: Vec<(Label, Vec<DS>)>) -> Self { let mut index_map = HashMap::new(); for (label_idx, (_label, sig_vec)) in entries.iter().enumerate() { for (sig_idx, db_sig) in sig_vec.iter().enumerate() { for key in db_sig.generate_index_keys_for_db_entry() { index_map .entry(key) .or_insert_with(Vec::new) .push((label_idx, sig_idx)); } } } FingerprintCollection { entries, index: index_map, _observed_marker: PhantomData, _database_sig_marker: PhantomData, _key_marker: PhantomData, } } } impl<OF, DS, K> FingerprintDb<OF, DS> for FingerprintCollection<OF, DS, K> where OF: ObservedFingerprint<Key = K>, DS: DatabaseSignature<OF> + Display, K: IndexKey, { fn find_best_match(&self, observed: &OF) -> Option<(&Label, &DS, f32)> { let observed_key = observed.generate_index_key(); let candidate_indices = match self.index.get(&observed_key) { Some(indices) => indices, None => { return None; } }; if candidate_indices.is_empty() { return None; } let mut best_label_ref = None; let mut best_sig_ref = None; let mut min_distance = u32::MAX; for &(label_idx, sig_idx) in candidate_indices { let (label, sig_vec) = &self.entries[label_idx]; let db_sig = &sig_vec[sig_idx]; if let Some(distance) = db_sig.calculate_distance(observed) { if distance < min_distance { min_distance = distance; best_label_ref = Some(label); best_sig_ref = Some(db_sig); } debug!( "distance: {}, label: {}, flavor: {:?}, sig: {}", distance, label.name, label.flavor, db_sig ); } } if let (Some(label), Some(sig)) = (best_label_ref, best_sig_ref) { Some((label, sig, sig.get_quality_score(min_distance))) } else { None } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/lib.rs
huginn-net-db/src/lib.rs
#![forbid(unsafe_code)] //! # huginn-net-db //! //! P0f database parser and matching traits for network fingerprinting. //! //! This crate provides: //! - Parsing of p0f database format //! - Database structures for TCP and HTTP signatures //! - Traits for fingerprint matching //! - Observable signal types // Core database functionality pub mod db; pub mod db_matching_trait; pub mod db_parse; pub mod error; // Protocol-specific types pub mod http; pub mod tcp; // Observable signals and matching impls pub mod observable_http_signals_matching; pub mod observable_signals; pub mod observable_tcp_signals_matching; // Display implementations for database types pub mod display; pub mod utils; // Re-export main types for convenience pub use db::{Database, Label, Type}; pub use error::DatabaseError; pub use utils::MatchQualityType;
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/http.rs
huginn-net-db/src/http.rs
#[derive(Clone, Debug, PartialEq)] pub struct Signature { /// HTTP version pub version: Version, /// ordered list of headers that should appear in matching traffic. pub horder: Vec<Header>, /// list of headers that must *not* appear in matching traffic. pub habsent: Vec<Header>, /// expected substring in 'User-Agent' or 'Server'. pub expsw: String, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum HttpMatchQuality { High, Medium, Low, Bad, } impl HttpMatchQuality { pub fn as_score(self) -> u32 { match self { HttpMatchQuality::High => 0, HttpMatchQuality::Medium => 1, HttpMatchQuality::Low => 2, HttpMatchQuality::Bad => 3, } } } impl crate::db_matching_trait::MatchQuality for HttpMatchQuality { // HTTP has 4 components, each can contribute max 3 points (Bad) const MAX_DISTANCE: u32 = 12; fn distance_to_score(distance: u32) -> f32 { match distance { 0 => 1.0, 1 => 0.95, 2 => 0.90, 3 => 0.80, 4..=5 => 0.70, 6..=7 => 0.60, 8..=9 => 0.40, 10..=11 => 0.20, d if d <= Self::MAX_DISTANCE => 0.10, _ => 0.05, } } } /// Version of the HTTP protocol used in a request or response. /// Used in signatures to distinguish behavior between HTTP/1.0 and HTTP/1.1. /// The `Any` variant is used in database signatures to match any HTTP version. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum Version { /// HTTP/1.0 V10, /// HTTP/1.1 V11, /// HTTP/2 V20, /// HTTP/3 V30, /// Matches any HTTP version (used in database signatures). Any, } impl Version { pub fn parse(version_str: &str) -> Option<Self> { match version_str { "HTTP/1.0" => Some(Version::V10), "HTTP/1.1" => Some(Version::V11), "HTTP/2" | "HTTP/2.0" => Some(Version::V20), "HTTP/3" | "HTTP/3.0" => Some(Version::V30), _ => None, } } } impl std::str::FromStr for Version { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { Self::parse(s).ok_or(()) } } impl Version { pub fn as_str(&self) -> &'static str { match self { Version::V10 => "HTTP/1.0", Version::V11 => "HTTP/1.1", Version::V20 => "HTTP/2", Version::V30 => "HTTP/3", Version::Any => "Any", } } } #[derive(Clone, Debug, PartialEq)] pub struct Header { pub optional: bool, pub name: String, pub value: Option<String>, } #[derive(Clone, Debug, PartialEq)] pub enum HttpDiagnosis { Dishonest, Anonymous, Generic, None, } impl Header { pub fn new<S: AsRef<str>>(name: S) -> Self { Header { optional: false, name: name.as_ref().to_owned(), value: None } } pub fn with_value<S: AsRef<str>>(mut self, value: S) -> Self { self.value = Some(value.as_ref().to_owned()); self } pub fn with_optional_value<S: AsRef<str>>(mut self, value: Option<S>) -> Self { self.value = value.map(|v| v.as_ref().to_owned()); self } pub fn optional(mut self) -> Self { self.optional = true; self } } pub fn request_optional_headers() -> Vec<&'static str> { vec![ "Cookie", "Referer", "Origin", "Range", "If-Modified-Since", "If-None-Match", "Via", "X-Forwarded-For", "Authorization", "Proxy-Authorization", "Cache-Control", ] } pub fn response_optional_headers() -> Vec<&'static str> { vec![ "Set-Cookie", "Last-Modified", "ETag", "Content-Length", "Content-Disposition", "Cache-Control", "Expires", "Pragma", "Location", "Refresh", "Content-Range", "Vary", ] } pub fn request_skip_value_headers() -> Vec<&'static str> { vec!["Host", "User-Agent"] } pub fn response_skip_value_headers() -> Vec<&'static str> { vec!["Date", "Content-Type", "Server"] } pub fn request_common_headers() -> Vec<&'static str> { vec![ "Host", "User-Agent", "Connection", "Accept", "Accept-Encoding", "Accept-Language", "Accept-Charset", "Keep-Alive", ] } pub fn response_common_headers() -> Vec<&'static str> { vec!["Content-Type", "Connection", "Keep-Alive", "Accept-Ranges", "Date"] }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/observable_signals.rs
huginn-net-db/src/observable_signals.rs
use crate::db::HttpIndexKey; use crate::db_matching_trait::ObservedFingerprint; use crate::http::{Header, Version}; use crate::tcp::{IpVersion, PayloadSize, Quirk, TcpOption, Ttl, WindowSize}; /// Represents observed TCP characteristics from network traffic. #[derive(Clone, Debug, PartialEq)] pub struct TcpObservation { /// IP version pub version: IpVersion, /// initial TTL used by the OS. pub ittl: Ttl, /// length of IPv4 options or IPv6 extension headers. pub olen: u8, /// maximum segment size, if specified in TCP options. pub mss: Option<u16>, /// window size. pub wsize: WindowSize, /// window scaling factor, if specified in TCP options. pub wscale: Option<u8>, /// layout and ordering of TCP options, if any. pub olayout: Vec<TcpOption>, /// properties and quirks observed in IP or TCP headers. pub quirks: Vec<Quirk>, /// payload size classification pub pclass: PayloadSize, } /// Represents observed HTTP request characteristics from network traffic. #[derive(Clone, Debug, PartialEq)] pub struct HttpRequestObservation { /// HTTP version pub version: Version, /// ordered list of headers that should appear in matching traffic (p0f style). pub horder: Vec<Header>, /// list of headers that must *not* appear in matching traffic (p0f style). pub habsent: Vec<Header>, /// expected substring in 'User-Agent' or 'Server'. pub expsw: String, } /// Represents observed HTTP response characteristics from network traffic. #[derive(Clone, Debug, PartialEq)] pub struct HttpResponseObservation { /// HTTP version pub version: Version, /// ordered list of headers that should appear in matching traffic (p0f style). pub horder: Vec<Header>, /// list of headers that must *not* appear in matching traffic (p0f style). pub habsent: Vec<Header>, /// expected substring in 'User-Agent' or 'Server'. pub expsw: String, } // ============================== // ObservedFingerprint - HTTP // ============================== impl ObservedFingerprint for HttpRequestObservation { type Key = HttpIndexKey; fn generate_index_key(&self) -> Self::Key { HttpIndexKey { http_version_key: self.version } } } impl ObservedFingerprint for HttpResponseObservation { type Key = HttpIndexKey; fn generate_index_key(&self) -> Self::Key { HttpIndexKey { http_version_key: self.version } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/error.rs
huginn-net-db/src/error.rs
use thiserror::Error; /// Error handling for database parsing and operations. #[derive(Error, Debug)] pub enum DatabaseError { /// An error occurred while parsing the p0f database format. #[error("Parse error: {0}")] Parse(String), /// Configuration is missing or invalid. #[error("Invalid configuration: {0}")] InvalidConfiguration(String), }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/display.rs
huginn-net-db/src/display.rs
use crate::db::Label; use core::fmt; use std::fmt::Formatter; impl fmt::Display for Label { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "{}:{}:{}:{}", self.ty, self.class.as_deref().unwrap_or_default(), self.name, self.flavor.as_deref().unwrap_or_default() ) } } mod tcp { use crate::observable_signals::TcpObservation; use crate::tcp::{IpVersion, PayloadSize, Quirk, Signature, TcpOption, Ttl, WindowSize}; use core::fmt; use std::fmt::Formatter; trait TcpDisplayFormat { fn get_version(&self) -> IpVersion; fn get_ittl(&self) -> Ttl; fn get_olen(&self) -> u8; fn get_mss(&self) -> Option<u16>; fn get_wsize(&self) -> WindowSize; fn get_wscale(&self) -> Option<u8>; fn get_olayout(&self) -> &[TcpOption]; fn get_quirks(&self) -> &[Quirk]; fn get_pclass(&self) -> PayloadSize; fn format_tcp_display(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}:{}:{}:", self.get_version(), self.get_ittl(), self.get_olen())?; if let Some(mss) = self.get_mss() { write!(f, "{mss}")?; } else { f.write_str("*")?; } write!(f, ":{},", self.get_wsize())?; if let Some(scale) = self.get_wscale() { write!(f, "{scale}")?; } else { f.write_str("*")?; } f.write_str(":")?; for (i, o) in self.get_olayout().iter().enumerate() { if i > 0 { f.write_str(",")?; } write!(f, "{o}")?; } f.write_str(":")?; for (i, q) in self.get_quirks().iter().enumerate() { if i > 0 { f.write_str(",")?; } write!(f, "{q}")?; } write!(f, ":{}", self.get_pclass()) } } impl TcpDisplayFormat for TcpObservation { fn get_version(&self) -> IpVersion { self.version } fn get_ittl(&self) -> Ttl { self.ittl.clone() } fn get_olen(&self) -> u8 { self.olen } fn get_mss(&self) -> Option<u16> { self.mss } fn get_wsize(&self) -> WindowSize { self.wsize.clone() } fn get_wscale(&self) -> Option<u8> { self.wscale } fn get_olayout(&self) -> &[TcpOption] { &self.olayout } fn get_quirks(&self) -> &[Quirk] { &self.quirks } fn get_pclass(&self) -> PayloadSize { self.pclass } } impl TcpDisplayFormat for Signature { fn get_version(&self) -> IpVersion { self.version } fn get_ittl(&self) -> Ttl { self.ittl.clone() } fn get_olen(&self) -> u8 { self.olen } fn get_mss(&self) -> Option<u16> { self.mss } fn get_wsize(&self) -> WindowSize { self.wsize.clone() } fn get_wscale(&self) -> Option<u8> { self.wscale } fn get_olayout(&self) -> &[TcpOption] { &self.olayout } fn get_quirks(&self) -> &[Quirk] { &self.quirks } fn get_pclass(&self) -> PayloadSize { self.pclass } } impl fmt::Display for TcpObservation { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_tcp_display(f) } } impl fmt::Display for Signature { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_tcp_display(f) } } impl fmt::Display for IpVersion { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use IpVersion::*; f.write_str(match self { V4 => "4", V6 => "6", Any => "*", }) } } impl fmt::Display for Ttl { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Ttl::Value(ttl) => write!(f, "{ttl}"), Ttl::Distance(ttl, distance) => write!(f, "{ttl}+{distance}"), Ttl::Guess(ttl) => write!(f, "{ttl}+?"), Ttl::Bad(ttl) => write!(f, "{ttl}-"), } } } impl fmt::Display for WindowSize { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use WindowSize::*; match self { Mss(n) => write!(f, "mss*{n}"), Mtu(n) => write!(f, "mtu*{n}"), Value(n) => write!(f, "{n}"), Mod(n) => write!(f, "%{n}"), Any => f.write_str("*"), } } } impl fmt::Display for TcpOption { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use TcpOption::*; match self { Eol(n) => write!(f, "eol+{n}"), Nop => f.write_str("nop"), Mss => f.write_str("mss"), Ws => f.write_str("ws"), Sok => f.write_str("sok"), Sack => f.write_str("sack"), TS => f.write_str("ts"), Unknown(n) => write!(f, "?{n}"), } } } impl fmt::Display for Quirk { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use Quirk::*; match self { Df => f.write_str("df"), NonZeroID => f.write_str("id+"), ZeroID => f.write_str("id-"), Ecn => f.write_str("ecn"), MustBeZero => f.write_str("0+"), FlowID => f.write_str("flow"), SeqNumZero => f.write_str("seq-"), AckNumNonZero => f.write_str("ack+"), AckNumZero => f.write_str("ack-"), NonZeroURG => f.write_str("uptr+"), Urg => f.write_str("urgf+"), Push => f.write_str("pushf+"), OwnTimestampZero => f.write_str("ts1-"), PeerTimestampNonZero => f.write_str("ts2+"), TrailinigNonZero => f.write_str("opt+"), ExcessiveWindowScaling => f.write_str("exws"), OptBad => f.write_str("bad"), } } } impl fmt::Display for PayloadSize { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use PayloadSize::*; f.write_str(match self { Zero => "0", NonZero => "+", Any => "*", }) } } } pub mod http { use crate::http::{Header, HttpDiagnosis, Signature, Version}; use crate::observable_signals::{HttpRequestObservation, HttpResponseObservation}; use core::fmt; use std::fmt::Formatter; pub trait HttpDisplayFormat { fn get_version(&self) -> Version; fn get_horder(&self) -> &[Header]; fn get_habsent(&self) -> &[Header]; fn get_expsw(&self) -> &str; fn format_http_display(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}:", self.get_version())?; for (i, h) in self.get_horder().iter().enumerate() { if i > 0 { f.write_str(",")?; } write!(f, "{h}")?; } f.write_str(":")?; for (i, h) in self.get_habsent().iter().enumerate() { if i > 0 { f.write_str(",")?; } write!(f, "{h}")?; } write!(f, ":{}", self.get_expsw()) } } impl HttpDisplayFormat for HttpRequestObservation { fn get_version(&self) -> Version { self.version } fn get_horder(&self) -> &[Header] { &self.horder } fn get_habsent(&self) -> &[Header] { &self.habsent } fn get_expsw(&self) -> &str { &self.expsw } } impl HttpDisplayFormat for HttpResponseObservation { fn get_version(&self) -> Version { self.version } fn get_horder(&self) -> &[Header] { &self.horder } fn get_habsent(&self) -> &[Header] { &self.habsent } fn get_expsw(&self) -> &str { &self.expsw } } impl HttpDisplayFormat for Signature { fn get_version(&self) -> Version { self.version } fn get_horder(&self) -> &[Header] { &self.horder } fn get_habsent(&self) -> &[Header] { &self.habsent } fn get_expsw(&self) -> &str { &self.expsw } } impl fmt::Display for HttpRequestObservation { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_http_display(f) } } impl fmt::Display for HttpResponseObservation { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_http_display(f) } } impl fmt::Display for Signature { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_http_display(f) } } impl fmt::Display for Version { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.write_str(match self { Version::V10 => "0", Version::V11 => "1", Version::V20 => "2", Version::V30 => "3", Version::Any => "*", }) } } impl fmt::Display for Header { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { if self.optional { f.write_str("?")?; } f.write_str(&self.name)?; if let Some(ref value) = self.value { write!(f, "=[{value}]")?; } Ok(()) } } impl fmt::Display for HttpDiagnosis { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { use crate::http::HttpDiagnosis::*; f.write_str(match self { Dishonest => "dishonest", Anonymous => "anonymous", Generic => "generic", None => "none", }) } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/utils.rs
huginn-net-db/src/utils.rs
/// Represents the quality of a match between an observed fingerprint and a database signature. #[derive(Clone, Debug)] pub enum MatchQualityType { Matched(f32), // 0.05 to 1.0 (quality score: 1.0 = perfect match, 0.05 = worst match) NotMatched, Disabled, }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/tcp.rs
huginn-net-db/src/tcp.rs
use tracing::debug; #[derive(Clone, Debug, PartialEq)] pub struct Signature { pub version: IpVersion, /// initial TTL used by the OS. pub ittl: Ttl, /// length of IPv4 options or IPv6 extension headers. pub olen: u8, /// maximum segment size, if specified in TCP options. pub mss: Option<u16>, /// window size. pub wsize: WindowSize, /// window scaling factor, if specified in TCP options. pub wscale: Option<u8>, /// layout and ordering of TCP options, if any. pub olayout: Vec<TcpOption>, /// properties and quirks observed in IP or TCP headers. pub quirks: Vec<Quirk>, /// payload size classification pub pclass: PayloadSize, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TcpMatchQuality { High, Medium, Low, } impl TcpMatchQuality { pub fn as_score(self) -> u32 { match self { TcpMatchQuality::High => 0, TcpMatchQuality::Medium => 1, TcpMatchQuality::Low => 2, } } } impl crate::db_matching_trait::MatchQuality for TcpMatchQuality { // TCP has 9 components, each can contribute max 2 points (Low) const MAX_DISTANCE: u32 = 18; fn distance_to_score(distance: u32) -> f32 { match distance { 0 => 1.0, 1 => 0.95, 2 => 0.90, 3..=4 => 0.80, 5..=6 => 0.70, 7..=9 => 0.60, 10..=12 => 0.40, 13..=15 => 0.20, d if d <= Self::MAX_DISTANCE => 0.10, _ => 0.05, } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum IpVersion { V4, V6, Any, } impl IpVersion { pub fn distance_ip_version(&self, other: &IpVersion) -> Option<u32> { if other == &IpVersion::Any { Some(TcpMatchQuality::High.as_score()) } else { match (self, other) { (IpVersion::V4, IpVersion::V4) | (IpVersion::V6, IpVersion::V6) => { Some(TcpMatchQuality::High.as_score()) } _ => None, } } } } /// Time To Live (TTL) representation used for OS fingerprinting and network distance calculation #[derive(Clone, Debug, PartialEq)] pub enum Ttl { /// Raw TTL value when we don't have enough context to determine initial TTL /// Contains the observed TTL value from the IP header Value(u8), /// TTL with calculated network distance /// First u8 is the observed TTL value /// Second u8 is the estimated number of hops (distance = initial_ttl - observed_ttl) Distance(u8, u8), /// TTL value that's been guessed based on common OS initial values /// Contains the estimated initial TTL (e.g., 64 for Linux, 128 for Windows) Guess(u8), /// Invalid or problematic TTL value /// Contains the raw TTL value that was deemed invalid (e.g., 0) Bad(u8), } impl Ttl { pub fn distance_ttl(&self, other: &Ttl) -> Option<u32> { match (self, other) { (Ttl::Value(a), Ttl::Value(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Distance(a1, a2), Ttl::Distance(b1, b2)) => { if a1 == b1 && a2 == b2 { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Distance(a1, a2), Ttl::Value(b1)) => { if a1.saturating_add(*a2) == *b1 { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Guess(a), Ttl::Guess(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Bad(a), Ttl::Bad(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Guess(a), Ttl::Value(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Value(a), Ttl::Distance(b1, b2)) => { if *a == b1.saturating_add(*b2) { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (Ttl::Value(a), Ttl::Guess(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } _ => None, } } } /// TCP Window Size representation used for fingerprinting different TCP stacks #[derive(Clone, Debug, PartialEq)] pub enum WindowSize { /// Window size is a multiple of MSS (Maximum Segment Size) /// The u8 value represents the multiplier (e.g., Mss(4) means window = MSS * 4) Mss(u8), /// Window size is a multiple of MTU (Maximum Transmission Unit) /// The u8 value represents the multiplier (e.g., Mtu(4) means window = MTU * 4) Mtu(u8), /// Raw window size value when it doesn't match any pattern /// Contains the actual window size value from the TCP header Value(u16), /// Window size follows a modulo pattern /// The u16 value represents the modulo base (e.g., Mod(1024) means window % 1024 == 0) Mod(u16), /// Represents any window size (wildcard matcher) Any, } impl WindowSize { pub fn distance_window_size(&self, other: &WindowSize, mss: Option<u16>) -> Option<u32> { match (self, other) { (WindowSize::Mss(a), WindowSize::Mss(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (WindowSize::Mtu(a), WindowSize::Mtu(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (WindowSize::Value(a), WindowSize::Mss(b)) => { if let Some(mss_value) = mss { if let Some(ratio_other) = a.checked_div(mss_value) { if *b as u16 == ratio_other { debug!( "window size difference: a {}, b {} == ratio_other {}", a, b, ratio_other ); Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } else { Some(TcpMatchQuality::Low.as_score()) } } else { Some(TcpMatchQuality::Low.as_score()) } } (WindowSize::Mod(a), WindowSize::Mod(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (WindowSize::Value(a), WindowSize::Value(b)) => { if a == b { Some(TcpMatchQuality::High.as_score()) } else { Some(TcpMatchQuality::Low.as_score()) } } (_, WindowSize::Any) => Some(TcpMatchQuality::High.as_score()), _ => None, } } } #[derive(Clone, Debug, PartialEq)] pub enum TcpOption { /// eol+n - explicit end of options, followed by n bytes of padding Eol(u8), /// nop - no-op option Nop, /// mss - maximum segment size Mss, /// ws - window scaling Ws, /// sok - selective ACK permitted Sok, /// sack - selective ACK (should not be seen) Sack, /// ts - timestamp TS, /// ?n - unknown option ID n Unknown(u8), } #[derive(Clone, Debug, PartialEq)] pub enum Quirk { /// df - "don't fragment" set (probably PMTUD); ignored for IPv6 Df, /// id+ - DF set but IPID non-zero; ignored for IPv6 NonZeroID, /// id- - DF not set but IPID is zero; ignored for IPv6 ZeroID, /// ecn - explicit congestion notification support Ecn, /// 0+ - "must be zero" field not zero; ignored for IPv6 MustBeZero, /// flow - non-zero IPv6 flow ID; ignored for IPv4 FlowID, /// seq- - sequence number is zero SeqNumZero, /// ack+ - ACK number is non-zero, but ACK flag not set AckNumNonZero, /// ack- - ACK number is zero, but ACK flag set AckNumZero, /// uptr+ - URG pointer is non-zero, but URG flag not set NonZeroURG, /// urgf+ - URG flag used Urg, /// pushf+ - PUSH flag used Push, /// ts1- - own timestamp specified as zero OwnTimestampZero, /// ts2+ - non-zero peer timestamp on initial SYN PeerTimestampNonZero, /// opt+ - trailing non-zero data in options segment TrailinigNonZero, /// exws - excessive window scaling factor (> 14) ExcessiveWindowScaling, /// bad - malformed TCP options OptBad, } /// Classification of TCP payload sizes used in fingerprinting #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum PayloadSize { /// Packet has no payload (empty) /// Common in SYN packets and some control messages Zero, /// Packet contains data in the payload /// Typical for data transfer packets NonZero, /// Matches any payload size /// Used as a wildcard in signature matching Any, } impl PayloadSize { pub fn distance_payload_size(&self, other: &PayloadSize) -> Option<u32> { if other == &PayloadSize::Any || self == other { Some(TcpMatchQuality::High.as_score()) } else { None } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/db_parse.rs
huginn-net-db/src/db_parse.rs
use std::str::FromStr; use crate::db::{Database, FingerprintCollection, Label, Type}; use crate::error::DatabaseError; use crate::{ http::{Header as HttpHeader, Signature as HttpSignature, Version as HttpVersion}, tcp::{IpVersion, PayloadSize, Quirk, Signature as TcpSignature, TcpOption, Ttl, WindowSize}, }; use nom::branch::alt; use nom::bytes::complete::{take_until, take_while}; use nom::character::complete::{alpha1, char, digit1}; use nom::combinator::{map, map_res, opt}; use nom::multi::{separated_list0, separated_list1}; use nom::sequence::{pair, separated_pair, terminated}; use nom::*; use nom::{ bytes::complete::tag, character::complete::{alphanumeric1, space0}, combinator::rest, sequence::preceded, IResult, }; use tracing::{trace, warn}; impl FromStr for Database { type Err = DatabaseError; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut classes = vec![]; let mut mtu_entries = vec![]; let mut ua_os_entries = vec![]; let mut temp_tcp_request_entries: Vec<(Label, Vec<TcpSignature>)> = vec![]; let mut temp_tcp_response_entries: Vec<(Label, Vec<TcpSignature>)> = vec![]; let mut temp_http_request_entries: Vec<(Label, Vec<HttpSignature>)> = vec![]; let mut temp_http_response_entries: Vec<(Label, Vec<HttpSignature>)> = vec![]; let mut cur_mod = None; for line in s.lines() { let line = line.trim(); if line.is_empty() || line.starts_with(';') { continue; } if line.starts_with("classes") { classes.append( &mut parse_classes(line) .map_err(|err| { DatabaseError::Parse(format!("fail to parse `classes`: {line}, {err}")) })? .1, ); } else if line.starts_with("ua_os") { ua_os_entries.append( &mut parse_ua_os(line) .map_err(|err| { DatabaseError::Parse(format!("fail to parse `ua_os`: {line}, {err}")) })? .1, ); } else if line.starts_with('[') && line.ends_with(']') { cur_mod = Some( parse_module(line) .map_err(|err| { DatabaseError::Parse(format!("fail to parse `module`: {line}, {err}")) })? .1, ); } else if let Some((module, direction)) = cur_mod.as_ref() { let (_, (name, value)) = parse_named_value(line).map_err(|err| { DatabaseError::Parse(format!("fail to parse named value: {line}, {err}")) })?; match name { "label" if module == "mtu" => { mtu_entries.push((value.to_string(), vec![])); } "sig" if module == "mtu" => { if let Some((_label, values)) = mtu_entries.last_mut() { let sig = value.parse::<u16>().map_err(|err| { DatabaseError::Parse(format!( "fail to parse `mtu` value: {value}, {err}" )) })?; values.push(sig); } else { return Err(DatabaseError::Parse(format!( "`mtu` value without `label`: {value}" ))); } } "label" => { let (_, label) = parse_label(value).map_err(|err| { DatabaseError::Parse(format!("fail to parse `label`: {value}, {err}")) })?; match (module.as_str(), direction.as_ref().map(|s| s.as_ref())) { ("tcp", Some("request")) => { temp_tcp_request_entries.push((label, vec![])) } ("tcp", Some("response")) => { temp_tcp_response_entries.push((label, vec![])) } ("http", Some("request")) => { temp_http_request_entries.push((label, vec![])) } ("http", Some("response")) => { temp_http_response_entries.push((label, vec![])) } _ => { warn!("skip `label` in unknown module `{}`: {}", module, value); } } } "sig" => match (module.as_str(), direction.as_ref().map(|s| s.as_ref())) { ("tcp", Some("request")) => { if let Some((label, values)) = temp_tcp_request_entries.last_mut() { let sig = value.parse()?; trace!("sig for `{}` tcp request: {}", label, sig); values.push(sig); } else { return Err(DatabaseError::Parse(format!( "tcp signature without `label`: {value}" ))); } } ("tcp", Some("response")) => { if let Some((label, values)) = temp_tcp_response_entries.last_mut() { let sig = value.parse()?; trace!("sig for `{}` tcp response: {}", label, sig); values.push(sig); } else { return Err(DatabaseError::Parse(format!( "tcp signature without `label`: {value}" ))); } } ("http", Some("request")) => { if let Some((label, values)) = temp_http_request_entries.last_mut() { let sig = value.parse()?; trace!("sig for `{}` http request: {}", label, sig); values.push(sig); } else { return Err(DatabaseError::Parse(format!( "http signature without `label`: {value}" ))); } } ("http", Some("response")) => { if let Some((label, values)) = temp_http_response_entries.last_mut() { let sig = value.parse()?; trace!("sig for `{}` http response: {}", label, sig); values.push(sig); } else { return Err(DatabaseError::Parse(format!( "http signature without `label`: {value}" ))); } } _ => { warn!("skip `sig` in unknown module `{}`: {}", module, value); } }, "sys" if module != "mtu" => {} _ => { warn!("skip unknown named value: {} = {}", name, value); } } } else { return Err(DatabaseError::Parse(format!( "unexpected line outside the module: {line}" ))); } } Ok(Database { classes, mtu: mtu_entries, ua_os: ua_os_entries, tcp_request: FingerprintCollection::new(temp_tcp_request_entries), tcp_response: FingerprintCollection::new(temp_tcp_response_entries), http_request: FingerprintCollection::new(temp_http_request_entries), http_response: FingerprintCollection::new(temp_http_response_entries), }) } } macro_rules! impl_from_str { ($ty:ty, $parse:ident) => { impl FromStr for $ty { type Err = DatabaseError; fn from_str(s: &str) -> Result<Self, Self::Err> { let (remaining, res) = $parse(s).map_err(|err| { DatabaseError::Parse(format!( "parse {} failed: {}, {}", stringify!($ty), s, err )) })?; if !remaining.is_empty() { Err(DatabaseError::Parse(format!( "parse {} failed, remaining: {}", stringify!($ty), remaining ))) } else { Ok(res) } } } }; } impl_from_str!(Label, parse_label); impl_from_str!(Type, parse_type); impl_from_str!(TcpSignature, parse_tcp_signature); impl_from_str!(IpVersion, parse_ip_version); impl_from_str!(Ttl, parse_ttl); impl_from_str!(WindowSize, parse_window_size); impl_from_str!(TcpOption, parse_tcp_option); impl_from_str!(Quirk, parse_quirk); impl_from_str!(PayloadSize, parse_payload_size); impl_from_str!(HttpSignature, parse_http_signature); impl_from_str!(HttpHeader, parse_http_header); fn parse_named_value(input: &str) -> IResult<&str, (&str, &str)> { let (input, (name, _, _, _, value)) = (alphanumeric1, space0, tag("="), space0, rest).parse(input)?; Ok((input, (name, value))) } fn parse_classes(input: &str) -> IResult<&str, Vec<String>> { let (input, (_, _, _, _, classes)) = ( tag("classes"), space0, tag("="), space0, separated_list0(tag(","), alphanumeric1), ) .parse(input)?; let class_vec = classes.into_iter().map(|s| s.to_string()).collect(); Ok((input, class_vec)) } fn parse_module(input: &str) -> IResult<&str, (String, Option<String>)> { let (input, (_, module, direction, _)) = (tag("["), alpha1, opt(preceded(tag(":"), alpha1)), tag("]")).parse(input)?; let module_str = module.to_string(); let direction_str = direction.map(|s| s.to_string()); Ok((input, (module_str, direction_str))) } fn parse_ua_os(input: &str) -> IResult<&str, Vec<(String, Option<String>)>> { let (input, (_, _, _, _, values)) = ( tag("ua_os"), space0, tag("="), space0, separated_list0(tag(","), parse_key_value), ) .parse(input)?; let result = values .into_iter() .map(|(name, value)| (name.to_string(), value.map(|s| s.to_string()))) .collect(); Ok((input, result)) } fn parse_key_value(input: &str) -> IResult<&str, (&str, Option<&str>)> { let (input, (name, _, value)) = (alphanumeric1, space0, opt(preceded((space0, tag("="), space0), alphanumeric1))) .parse(input)?; Ok((input, (name, value))) } fn parse_label(input: &str) -> IResult<&str, Label> { let (input, (ty, _, class, _, name, flavor)) = ( parse_type, tag(":"), alt((map(tag("!"), |_| None), map(take_until(":"), |s: &str| Some(s.to_string())))), tag(":"), take_until(":"), opt(preceded(tag(":"), rest)), ) .parse(input)?; Ok(( input, Label { ty, class, name: name.to_string(), flavor: flavor.filter(|f| !f.is_empty()).map(String::from), }, )) } fn parse_type(input: &str) -> IResult<&str, Type> { alt((tag("s").map(|_| Type::Specified), tag("g").map(|_| Type::Generic))).parse(input) } fn parse_tcp_signature(input: &str) -> IResult<&str, TcpSignature> { let ( input, (version, _, ittl, _, olen, _, mss, _, wsize, _, wscale, _, olayout, _, quirks, _, pclass), ) = ( parse_ip_version, tag(":"), parse_ttl, tag(":"), map_res(digit1, |s: &str| s.parse::<u8>()), // olen tag(":"), alt((tag("*").map(|_| None), map_res(digit1, |s: &str| s.parse::<u16>().map(Some)))), // mss tag(":"), parse_window_size, tag(","), alt((tag("*").map(|_| None), map_res(digit1, |s: &str| s.parse::<u8>().map(Some)))), // wscale tag(":"), separated_list1(tag(","), parse_tcp_option), tag(":"), separated_list0(tag(","), parse_quirk), tag(":"), parse_payload_size, ) .parse(input)?; Ok(( input, TcpSignature { version, ittl, olen, mss, wsize, wscale, olayout, quirks, pclass }, )) } fn parse_ip_version(input: &str) -> IResult<&str, IpVersion> { alt(( map(tag("4"), |_| IpVersion::V4), map(tag("6"), |_| IpVersion::V6), map(tag("*"), |_| IpVersion::Any), )) .parse(input) } fn parse_ttl(input: &str) -> IResult<&str, Ttl> { alt(( map_res(terminated(digit1, tag("-")), |s: &str| s.parse::<u8>().map(Ttl::Bad)), map_res(terminated(digit1, tag("+?")), |s: &str| s.parse::<u8>().map(Ttl::Guess)), map_res( separated_pair(digit1, tag("+"), digit1), |(ttl_str, distance_str): (&str, &str)| match ( ttl_str.parse::<u8>(), distance_str.parse::<u8>(), ) { (Ok(ttl), Ok(distance)) => Ok(Ttl::Distance(ttl, distance)), (Err(_), _) => Err("Failed to parse ttl"), (_, Err(_)) => Err("Failed to parse distance"), }, ), map_res(digit1, |s: &str| s.parse::<u8>().map(Ttl::Value)), )) .parse(input) } fn parse_window_size(input: &str) -> IResult<&str, WindowSize> { alt(( map(tag("*"), |_| WindowSize::Any), map_res(preceded(tag("mss*"), digit1), |s: &str| s.parse::<u8>().map(WindowSize::Mss)), map_res(preceded(tag("mtu*"), digit1), |s: &str| s.parse::<u8>().map(WindowSize::Mtu)), map_res(preceded(tag("%"), digit1), |s: &str| s.parse::<u16>().map(WindowSize::Mod)), map_res(digit1, |s: &str| s.parse::<u16>().map(WindowSize::Value)), )) .parse(input) } fn parse_tcp_option(input: &str) -> IResult<&str, TcpOption> { alt(( map_res(preceded(tag("eol+"), digit1), |s: &str| s.parse::<u8>().map(TcpOption::Eol)), tag("nop").map(|_| TcpOption::Nop), tag("mss").map(|_| TcpOption::Mss), tag("ws").map(|_| TcpOption::Ws), tag("sok").map(|_| TcpOption::Sok), tag("sack").map(|_| TcpOption::Sack), tag("ts").map(|_| TcpOption::TS), preceded(tag("?"), map(digit1, |s: &str| s.parse::<u8>().unwrap_or(0))) .map(TcpOption::Unknown), )) .parse(input) } fn parse_quirk(input: &str) -> IResult<&str, Quirk> { alt(( map(tag("df"), |_| Quirk::Df), map(tag("id+"), |_| Quirk::NonZeroID), map(tag("id-"), |_| Quirk::ZeroID), map(tag("ecn"), |_| Quirk::Ecn), map(tag("0+"), |_| Quirk::MustBeZero), map(tag("flow"), |_| Quirk::FlowID), map(tag("seq-"), |_| Quirk::SeqNumZero), map(tag("ack+"), |_| Quirk::AckNumNonZero), map(tag("ack-"), |_| Quirk::AckNumZero), map(tag("uptr+"), |_| Quirk::NonZeroURG), map(tag("urgf+"), |_| Quirk::Urg), map(tag("pushf+"), |_| Quirk::Push), map(tag("ts1-"), |_| Quirk::OwnTimestampZero), map(tag("ts2+"), |_| Quirk::PeerTimestampNonZero), map(tag("opt+"), |_| Quirk::TrailinigNonZero), map(tag("exws"), |_| Quirk::ExcessiveWindowScaling), map(tag("bad"), |_| Quirk::OptBad), )) .parse(input) } fn parse_payload_size(input: &str) -> IResult<&str, PayloadSize> { alt(( map(tag("0"), |_| PayloadSize::Zero), map(tag("+"), |_| PayloadSize::NonZero), map(tag("*"), |_| PayloadSize::Any), )) .parse(input) } fn parse_http_signature(input: &str) -> IResult<&str, HttpSignature> { let (input, (version, _, horder, _, habsent, _, expsw)) = ( parse_http_version, tag(":"), separated_list1(tag(","), parse_http_header), tag(":"), opt(separated_list0(tag(","), parse_http_header)), tag(":"), rest, ) .parse(input)?; let habsent = habsent .unwrap_or_default() .into_iter() .filter(|h| !h.name.is_empty()) .collect(); Ok((input, HttpSignature { version, horder, habsent, expsw: expsw.to_string() })) } fn parse_http_version(input: &str) -> IResult<&str, HttpVersion> { alt(( map(tag("0"), |_| HttpVersion::V10), map(tag("1"), |_| HttpVersion::V11), map(tag("*"), |_| HttpVersion::Any), )) .parse(input) } fn parse_header_key_value(input: &str) -> IResult<&str, (&str, Option<&str>)> { pair( take_while(|c: char| (c.is_ascii_alphanumeric() || c == '-') && c != ':' && c != '='), opt(preceded(tag("=["), terminated(take_until("]"), char(']')))), ) .parse(input) } fn parse_http_header(input: &str) -> IResult<&str, HttpHeader> { let (input, optional) = opt(char('?')).parse(input)?; let (input, (name, value)) = parse_header_key_value(input)?; Ok(( input, HttpHeader { optional: optional.is_some(), name: name.to_string(), value: value.map(|s| s.to_string()), }, )) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/db_matching_trait.rs
huginn-net-db/src/db_matching_trait.rs
use crate::db::Label; use std::fmt::Debug; use std::hash::Hash; /// An observed fingerprint from live network traffic. pub trait ObservedFingerprint: Clone + Debug { /// The type of key used to index database signatures compatible with this observed fingerprint. type Key: IndexKey; /// Generates an index key from this observed fingerprint. fn generate_index_key(&self) -> Self::Key; } /// A fingerprint signature as defined in a database. /// `OF` is the type of `ObservedFingerprint` that this database signature can be compared against. pub trait DatabaseSignature<OF: ObservedFingerprint> { /// Calculates a distance or dissimilarity score. Lower is better. fn calculate_distance(&self, observed: &OF) -> Option<u32>; /// Returns the quality score based on the distance. fn get_quality_score(&self, distance: u32) -> f32; /// Generates index keys from this database signature. /// It's a Vec because some DB signatures (like IpVersion::Any) might map to multiple keys. /// The Option<OF::Key> in the Vec allows for cases where a specific DB sig might not produce a key /// for a certain specific version (e.g. an IpVersion::Any sig, when asked to produce a V4 key, will). fn generate_index_keys_for_db_entry(&self) -> Vec<OF::Key>; } /// Base trait for keys used in fingerprint indexes. pub trait IndexKey: Debug + Clone + Eq + Hash {} /// Represents a collection of database signatures of a specific type. /// `OF` is the `ObservedFingerprint` type. /// `DS` is the `DatabaseSignature` type that can be compared against `OF`. pub trait FingerprintDb<OF: ObservedFingerprint, DS: DatabaseSignature<OF>> { /// Finds the best match for an observed fingerprint within this database. /// Returns the label of the match, the matching database signature, and a quality score. fn find_best_match(&self, observed: &OF) -> Option<(&Label, &DS, f32)>; } pub trait MatchQuality { /// Maximum possible distance for this quality type const MAX_DISTANCE: u32; /// Converts distance to a quality score between 0.0 and 1.0 fn distance_to_score(distance: u32) -> f32; }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/observable_tcp_signals_matching.rs
huginn-net-db/src/observable_tcp_signals_matching.rs
use crate::db::TcpIndexKey; use crate::db_matching_trait::{DatabaseSignature, MatchQuality, ObservedFingerprint}; use crate::observable_signals::TcpObservation; use crate::tcp::{self, IpVersion, PayloadSize}; impl TcpObservation { pub(crate) fn distance_olen(&self, other: &tcp::Signature) -> Option<u32> { if self.olen == other.olen { Some(tcp::TcpMatchQuality::High.as_score()) } else { Some(tcp::TcpMatchQuality::Low.as_score()) } } pub(crate) fn distance_mss(&self, other: &tcp::Signature) -> Option<u32> { if other.mss.is_none() || self.mss == other.mss { Some(tcp::TcpMatchQuality::High.as_score()) } else { Some(tcp::TcpMatchQuality::Low.as_score()) } } pub(crate) fn distance_wscale(&self, other: &tcp::Signature) -> Option<u32> { if other.wscale.is_none() || self.wscale == other.wscale { Some(tcp::TcpMatchQuality::High.as_score()) } else { Some(tcp::TcpMatchQuality::Medium.as_score()) } } pub(crate) fn distance_olayout(&self, other: &tcp::Signature) -> Option<u32> { if self.olayout == other.olayout { Some(tcp::TcpMatchQuality::High.as_score()) } else { None } } pub(crate) fn distance_quirks(&self, other: &tcp::Signature) -> Option<u32> { if self.quirks == other.quirks { Some(tcp::TcpMatchQuality::High.as_score()) } else { None } } } impl ObservedFingerprint for TcpObservation { type Key = TcpIndexKey; fn generate_index_key(&self) -> Self::Key { let olayout_parts: Vec<String> = self.olayout.iter().map(|opt| format!("{opt}")).collect(); TcpIndexKey { ip_version_key: self.version, olayout_key: olayout_parts.join(","), pclass_key: self.pclass, } } } impl DatabaseSignature<TcpObservation> for tcp::Signature { fn calculate_distance(&self, observed: &TcpObservation) -> Option<u32> { let distance = observed .version .distance_ip_version(&self.version)? .saturating_add(observed.ittl.distance_ttl(&self.ittl)?) .saturating_add(observed.distance_olen(self)?) .saturating_add(observed.distance_mss(self)?) .saturating_add( observed .wsize .distance_window_size(&self.wsize, observed.mss)?, ) .saturating_add(observed.distance_wscale(self)?) .saturating_add(observed.distance_olayout(self)?) .saturating_add(observed.distance_quirks(self)?) .saturating_add(observed.pclass.distance_payload_size(&self.pclass)?); Some(distance) } /// Returns the quality score based on the distance. /// /// The score is a value between 0.0 and 1.0, where 1.0 is a perfect match. /// /// The score is calculated based on the distance of the observed signal to the database signature. /// The distance is a value between 0 and 18, where 0 is a perfect match and 18 is the maximum possible distance. /// fn get_quality_score(&self, distance: u32) -> f32 { tcp::TcpMatchQuality::distance_to_score(distance) } fn generate_index_keys_for_db_entry(&self) -> Vec<TcpIndexKey> { let mut keys = Vec::new(); let olayout_key_str = self .olayout .iter() .map(|opt| format!("{opt}")) .collect::<Vec<String>>() .join(","); let versions_for_keys = if self.version == IpVersion::Any { vec![IpVersion::V4, IpVersion::V6] } else { vec![self.version] }; let pclasses_for_keys = if self.pclass == PayloadSize::Any { vec![PayloadSize::Zero, PayloadSize::NonZero] } else { vec![self.pclass] }; for v_key_part in &versions_for_keys { for pc_key_part in &pclasses_for_keys { keys.push(TcpIndexKey { ip_version_key: *v_key_part, olayout_key: olayout_key_str.clone(), pclass_key: *pc_key_part, }); } } keys } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/src/observable_http_signals_matching.rs
huginn-net-db/src/observable_http_signals_matching.rs
use crate::db::HttpIndexKey; use crate::db_matching_trait::{DatabaseSignature, MatchQuality}; use crate::http::{self, Header, HttpMatchQuality, Version}; use crate::observable_signals::{HttpRequestObservation, HttpResponseObservation}; pub trait HttpDistance { fn get_version(&self) -> Version; fn get_horder(&self) -> &[Header]; fn get_habsent(&self) -> &[Header]; fn get_expsw(&self) -> &str; fn distance_ip_version(&self, other: &http::Signature) -> Option<u32> { if other.version == Version::Any || self.get_version() == other.version { Some(HttpMatchQuality::High.as_score()) } else { None } } // Compare two header vectors respecting order and allowing optional header skips // // This function implements a sophisticated two-pointer algorithm to compare HTTP headers // from observed traffic against database signatures while preserving order and handling // optional headers that may be missing from the observed traffic. // // Algorithm Overview: // 1. Use two pointers to traverse both lists simultaneously // 2. When headers match perfectly (name + value), advance both pointers // 3. When names match but values differ, count as error only if header is required // 4. When names differ, skip optional signature headers or count required ones as errors // 5. Handle remaining headers at the end of either list // // Parameters: // - observed: Headers from actual HTTP traffic (never marked as optional) // - signature: Headers from database signature (may have optional headers marked with ?) // // Returns: // - Some(score) based on error count converted to quality score // - None if too many errors (unmatchable) fn distance_header(observed: &[Header], signature: &[Header]) -> Option<u32> { let mut obs_idx = 0usize; // Index pointer for observed headers let mut sig_idx = 0usize; // Index pointer for signature headers let mut errors: u32 = 0; // Running count of matching errors while obs_idx < observed.len() && sig_idx < signature.len() { let obs_header = &observed[obs_idx]; let sig_header = &signature[sig_idx]; if obs_header.name == sig_header.name && obs_header.value == sig_header.value { obs_idx = obs_idx.saturating_add(1); sig_idx = sig_idx.saturating_add(1); } else if obs_header.name == sig_header.name { if !sig_header.optional { errors = errors.saturating_add(1); } obs_idx = obs_idx.saturating_add(1); sig_idx = sig_idx.saturating_add(1); } else if sig_header.optional { sig_idx = sig_idx.saturating_add(1); } else { errors = errors.saturating_add(1); sig_idx = sig_idx.saturating_add(1); } } while obs_idx < observed.len() { errors = errors.saturating_add(1); obs_idx = obs_idx.saturating_add(1); } while sig_idx < signature.len() { if !signature[sig_idx].optional { errors = errors.saturating_add(1); } sig_idx = sig_idx.saturating_add(1); } match errors { 0..=2 => Some(HttpMatchQuality::High.as_score()), // 0-2 errors: High quality match 3..=5 => Some(HttpMatchQuality::Medium.as_score()), // 3-5 errors: Medium quality match 6..=8 => Some(HttpMatchQuality::Low.as_score()), // 6-8 errors: Low quality match 9..=11 => Some(HttpMatchQuality::Bad.as_score()), // 9-11 errors: Bad quality match _ => None, // 12+ errors: Too many differences, not a viable match } } fn distance_horder(&self, other: &http::Signature) -> Option<u32> { Self::distance_header(self.get_horder(), &other.horder) } fn distance_habsent(&self, other: &http::Signature) -> Option<u32> { Self::distance_header(self.get_habsent(), &other.habsent) } fn distance_expsw(&self, other: &http::Signature) -> Option<u32> { if other.expsw.as_str().contains(self.get_expsw()) { Some(HttpMatchQuality::High.as_score()) } else { Some(HttpMatchQuality::Bad.as_score()) } } } impl HttpDistance for HttpRequestObservation { fn get_version(&self) -> Version { self.version } fn get_horder(&self) -> &[Header] { &self.horder } fn get_habsent(&self) -> &[Header] { &self.habsent } fn get_expsw(&self) -> &str { &self.expsw } } impl HttpDistance for HttpResponseObservation { fn get_version(&self) -> Version { self.version } fn get_horder(&self) -> &[Header] { &self.horder } fn get_habsent(&self) -> &[Header] { &self.habsent } fn get_expsw(&self) -> &str { &self.expsw } } trait HttpSignatureHelper { fn calculate_http_distance<T: HttpDistance>(&self, observed: &T) -> Option<u32>; fn generate_http_index_keys(&self) -> Vec<HttpIndexKey>; /// Returns the quality score based on the distance. /// /// The score is a value between 0.0 and 1.0, where 1.0 is a perfect match. /// /// The score is calculated based on the distance of the observed signal to the database signature. /// The distance is a value between 0 and 12, where 0 is a perfect match and 12 is the maximum possible distance. fn get_quality_score_by_distance(&self, distance: u32) -> f32 { http::HttpMatchQuality::distance_to_score(distance) } } impl HttpSignatureHelper for http::Signature { fn calculate_http_distance<T: HttpDistance>(&self, observed: &T) -> Option<u32> { let signature: &http::Signature = self; let distance = observed .distance_ip_version(signature)? .saturating_add(observed.distance_horder(signature)?) .saturating_add(observed.distance_habsent(signature)?) .saturating_add(observed.distance_expsw(signature)?); Some(distance) } fn generate_http_index_keys(&self) -> Vec<HttpIndexKey> { let mut keys = Vec::new(); if self.version == Version::Any { keys.push(HttpIndexKey { http_version_key: Version::V10 }); keys.push(HttpIndexKey { http_version_key: Version::V11 }); } else { keys.push(HttpIndexKey { http_version_key: self.version }); } keys } } impl DatabaseSignature<HttpRequestObservation> for http::Signature { fn calculate_distance(&self, observed: &HttpRequestObservation) -> Option<u32> { self.calculate_http_distance(observed) } fn get_quality_score(&self, distance: u32) -> f32 { self.get_quality_score_by_distance(distance) } fn generate_index_keys_for_db_entry(&self) -> Vec<HttpIndexKey> { self.generate_http_index_keys() } } impl DatabaseSignature<HttpResponseObservation> for http::Signature { fn calculate_distance(&self, observed: &HttpResponseObservation) -> Option<u32> { self.calculate_http_distance(observed) } fn get_quality_score(&self, distance: u32) -> f32 { self.get_quality_score_by_distance(distance) } fn generate_index_keys_for_db_entry(&self) -> Vec<HttpIndexKey> { self.generate_http_index_keys() } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/tests/db.rs
huginn-net-db/tests/db.rs
use huginn_net_db::Database; #[test] fn test_default_database() { let db = match Database::load_default() { Ok(db) => db, Err(e) => { panic!("Failed to create default database: {e}"); } }; assert_eq!(db.classes, vec!["win", "unix", "other"]); assert_eq!( db.mtu, vec![ ("Ethernet or modem".to_owned(), vec![576, 1500]), ("DSL".to_owned(), vec![1452, 1454, 1492]), ("GIF".to_owned(), vec![1240, 1280]), ("generic tunnel or VPN".to_owned(), vec![1300, 1400, 1420, 1440, 1450, 1460]), ("IPSec or GRE".to_owned(), vec![1476]), ("IPIP or SIT".to_owned(), vec![1480]), ("PPTP".to_owned(), vec![1490]), ("AX.25 radio modem".to_owned(), vec![256]), ("SLIP".to_owned(), vec![552]), ("Google".to_owned(), vec![1470]), ("VLAN".to_owned(), vec![1496]), ("Ericsson HIS modem".to_owned(), vec![1656]), ("jumbo Ethernet".to_owned(), vec![9000]), ("loopback".to_owned(), vec![3924, 16384, 16436]) ] ); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/tests/tcp.rs
huginn-net-db/tests/tcp.rs
use huginn_net_db::tcp::{TcpMatchQuality, Ttl}; #[test] fn test_distance_ttl_matching_cases() { assert_eq!( Ttl::Value(64).distance_ttl(&Ttl::Value(64)), Some(TcpMatchQuality::High.as_score()) ); assert_eq!( Ttl::Distance(57, 7).distance_ttl(&Ttl::Distance(57, 7)), Some(TcpMatchQuality::High.as_score()) ); assert_eq!( Ttl::Distance(57, 7).distance_ttl(&Ttl::Value(64)), Some(TcpMatchQuality::High.as_score()) ); assert_eq!( Ttl::Guess(64).distance_ttl(&Ttl::Value(64)), Some(TcpMatchQuality::High.as_score()) ); } #[test] fn test_distance_ttl_non_matching_cases() { assert_eq!( Ttl::Value(64).distance_ttl(&Ttl::Value(128)), Some(TcpMatchQuality::Low.as_score()) ); assert_eq!( Ttl::Distance(57, 7).distance_ttl(&Ttl::Value(128)), Some(TcpMatchQuality::Low.as_score()) ); assert_eq!(Ttl::Bad(0).distance_ttl(&Ttl::Bad(1)), Some(TcpMatchQuality::Low.as_score())); } #[test] fn test_distance_ttl_additional_cases() { assert_eq!( Ttl::Value(64).distance_ttl(&Ttl::Distance(57, 7)), Some(TcpMatchQuality::High.as_score()) ); assert_eq!( Ttl::Value(64).distance_ttl(&Ttl::Guess(64)), Some(TcpMatchQuality::High.as_score()) ); assert_eq!( Ttl::Value(64).distance_ttl(&Ttl::Distance(60, 7)), Some(TcpMatchQuality::Low.as_score()) ); } #[test] fn test_distance_ttl_incompatible_types() { assert_eq!(Ttl::Bad(0).distance_ttl(&Ttl::Value(64)), None); assert_eq!(Ttl::Distance(64, 7).distance_ttl(&Ttl::Bad(0)), None); assert_eq!(Ttl::Guess(64).distance_ttl(&Ttl::Distance(64, 7)), None); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/tests/db_parse.rs
huginn-net-db/tests/db_parse.rs
use huginn_net_db::http::{ Header as HttpHeader, Signature as HttpSignature, Version as HttpVersion, }; use huginn_net_db::tcp::Quirk::{AckNumNonZero, Df, NonZeroID}; use huginn_net_db::tcp::TcpOption::{Mss, Nop, Sok, Ws, TS}; use huginn_net_db::tcp::{IpVersion, PayloadSize, Signature as TcpSignature, Ttl, WindowSize}; use huginn_net_db::{Label, Type}; use lazy_static::lazy_static; lazy_static! { static ref LABELS: Vec<(&'static str, Label)> = vec![ ( "s:!:Uncle John's Networked ls Utility:2.3.0.1", Label { ty: Type::Specified, class: None, name: "Uncle John's Networked ls Utility".to_owned(), flavor: Some("2.3.0.1".to_owned()), }, ), ( "s:unix:Linux:3.11 and newer", Label { ty: Type::Specified, class: Some("unix".to_owned()), name: "Linux".to_owned(), flavor: Some("3.11 and newer".to_owned()), }, ), ( "s:!:Chrome:11.x to 26.x", Label { ty: Type::Specified, class: None, name: "Chrome".to_owned(), flavor: Some("11.x to 26.x".to_owned()), }, ), ( "s:!:curl:", Label { ty: Type::Specified, class: None, name: "curl".to_owned(), flavor: None, }, ) ]; static ref TCP_SIGNATURES: Vec<(&'static str, TcpSignature)> = vec![ ( "*:64:0:*:mss*20,10:mss,sok,ts,nop,ws:df,id+:0", TcpSignature { version: IpVersion::Any, ittl: Ttl::Value(64), olen: 0, mss: None, wsize: WindowSize::Mss(20), wscale: Some(10), olayout: vec![Mss, Sok, TS, Nop, Ws], quirks: vec![Df, NonZeroID], pclass: PayloadSize::Zero, } ), ( "*:64:0:*:16384,0:mss::0", TcpSignature { version: IpVersion::Any, ittl: Ttl::Value(64), olen: 0, mss: None, wsize: WindowSize::Value(16384), wscale: Some(0), olayout: vec![Mss], quirks: vec![], pclass: PayloadSize::Zero, } ), ( "4:128:0:1460:mtu*2,0:mss,nop,ws::0", TcpSignature { version: IpVersion::V4, ittl: Ttl::Value(128), olen: 0, mss: Some(1460), wsize: WindowSize::Mtu(2), wscale: Some(0), olayout: vec![Mss, Nop, Ws], quirks: vec![], pclass: PayloadSize::Zero, } ), ( "*:64-:0:265:%512,0:mss,sok,ts:ack+:0", TcpSignature { version: IpVersion::Any, ittl: Ttl::Bad(64), olen: 0, mss: Some(265), wsize: WindowSize::Mod(512), wscale: Some(0), olayout: vec![Mss, Sok, TS], quirks: vec![AckNumNonZero], pclass: PayloadSize::Zero, } ), ( "*:64:0:*:mss*44,1:mss,sok,ts,nop,ws:df,id+:0", TcpSignature { version: IpVersion::Any, ittl: Ttl::Value(64), olen: 0, mss: None, wsize: WindowSize::Mss(44), wscale: Some(1), olayout: vec![Mss, Sok, TS, Nop, Ws], quirks: vec![Df, NonZeroID], pclass: PayloadSize::Zero, } ), ( "*:64:0:*:*,*:mss,sok,ts,nop,ws:df,id+:0", TcpSignature { version: IpVersion::Any, ittl: Ttl::Value(64), olen: 0, mss: None, wsize: WindowSize::Any, wscale: None, olayout: vec![Mss, Sok, TS, Nop, Ws], quirks: vec![Df, NonZeroID], pclass: PayloadSize::Zero, } ) ]; static ref TTLS: Vec<(&'static str, Ttl)> = vec![ ( "64", Ttl::Value(64) ), ( "54+10", Ttl::Distance(54, 10) ), ( "64-", Ttl::Bad(64) ), ( "54+?", Ttl::Guess(54) ) ]; static ref HTTP_SIGNATURES: Vec<(&'static str, HttpSignature)> = vec![ ( "*:Host,User-Agent,Accept=[,*/*;q=],?Accept-Language,Accept-Encoding=[gzip,deflate],Accept-Charset=[utf-8;q=0.7,*;q=0.7],Keep-Alive=[300],Connection=[keep-alive]::Firefox/", HttpSignature { version: HttpVersion::Any, horder: vec![ header("Host"), header("User-Agent"), header("Accept").with_value(",*/*;q="), header("Accept-Language").optional(), header("Accept-Encoding").with_value("gzip,deflate"), header("Accept-Charset").with_value("utf-8;q=0.7,*;q=0.7"), header("Keep-Alive").with_value("300"), header("Connection").with_value("keep-alive"), ], habsent: vec![], expsw: "Firefox/".to_owned(), } ) ]; static ref HTTP_HEADERS: Vec<(&'static str, HttpHeader)> = vec![ ("Host", HttpHeader{ optional: false, name: "Host".to_owned(), value: None}), ("User-Agent", HttpHeader{ optional: false, name: "User-Agent".to_owned(), value: None}), ("Accept=[,*/*;q=]", HttpHeader{ optional: false, name: "Accept".to_owned(), value: Some(",*/*;q=".to_owned())}), ("?Accept-Language", HttpHeader{ optional: true, name: "Accept-Language".to_owned(), value: None}), ]; } #[test] fn test_label() { for (s, l) in LABELS.iter() { let result = s.parse::<Label>(); assert!(result.is_ok(), "Failed to parse label: {s}"); if let Ok(ref parsed) = result { assert_eq!(parsed, l); } } } #[test] fn test_tcp_signature() { for (s, sig) in TCP_SIGNATURES.iter() { let result = s.parse::<TcpSignature>(); assert!(result.is_ok(), "Failed to parse TCP signature: {s}"); if let Ok(ref parsed) = result { assert_eq!(parsed, sig); } assert_eq!(&sig.to_string(), s); } } #[test] fn test_ttl() { for (s, ttl) in TTLS.iter() { let result = s.parse::<Ttl>(); assert!(result.is_ok(), "Failed to parse TTL: {s}"); if let Ok(ref parsed) = result { assert_eq!(parsed, ttl); } assert_eq!(&ttl.to_string(), s); } } #[test] fn test_http_signature() { for (s, sig) in HTTP_SIGNATURES.iter() { let result = s.parse::<HttpSignature>(); assert!(result.is_ok(), "Failed to parse HTTP signature: {s}"); if let Ok(ref parsed) = result { assert_eq!(parsed, sig); } assert_eq!(&sig.to_string(), s); } } #[test] fn test_http_header() { for (s, h) in HTTP_HEADERS.iter() { let result = s.parse::<HttpHeader>(); assert!(result.is_ok(), "Failed to parse HTTP header: {s}"); if let Ok(ref parsed) = result { assert_eq!(parsed, h); } assert_eq!(&h.to_string(), s); } } /// Test helper function to create HTTP headers fn header<S: AsRef<str>>(name: S) -> HttpHeader { HttpHeader::new(name) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-db/tests/observable_http_signals_matching.rs
huginn-net-db/tests/observable_http_signals_matching.rs
use huginn_net_db::http::{Header, HttpMatchQuality}; use huginn_net_db::observable_http_signals_matching::HttpDistance; use huginn_net_db::observable_signals::{HttpRequestObservation, HttpResponseObservation}; #[test] fn test_distance_header_with_one_optional_header_mismatch() { let a = vec![ Header::new("Date"), Header::new("Server"), Header::new("Last-Modified").optional(), Header::new("Accept-Ranges").optional().with_value("bytes"), Header::new("Content-Length").optional(), Header::new("Content-Range").optional(), Header::new("Keep-Alive").optional().with_value("timeout"), Header::new("Connection").with_value("Keep-Alive"), Header::new("Transfer-Encoding") .optional() .with_value("chunked"), Header::new("Content-Type"), ]; let b = vec![ Header::new("Date"), Header::new("Server"), Header::new("Last-Modified").optional(), Header::new("Accept-Ranges").optional().with_value("bytes"), Header::new("Content-Length").optional(), Header::new("Content-Range").optional(), Header::new("Keep-Alive").with_value("timeout"), Header::new("Connection").with_value("Keep-Alive"), Header::new("Transfer-Encoding") .optional() .with_value("chunked"), Header::new("Content-Type"), ]; assert!(a[6].optional); assert!(!b[6].optional); assert_ne!(a[6], b[6]); let result = <HttpResponseObservation as HttpDistance>::distance_header(&a, &b); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Expected Medium quality for 1 error in lists of 10" ); } #[test] fn test_distance_header_optional_skip_in_middle() { let observed = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Connection").with_value("keep-alive"), ]; let signature = vec![ Header::new("Host"), Header::new("Accept-Language") .optional() .with_value("en-US"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Connection").with_value("keep-alive"), ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Optional header in middle should be skipped for perfect alignment" ); } #[test] fn test_distance_header_multiple_optional_skips() { let observed = vec![Header::new("Host"), Header::new("Connection").with_value("keep-alive")]; let signature = vec![ Header::new("Host"), Header::new("Accept-Language") .optional() .with_value("en-US"), Header::new("Accept-Encoding").optional().with_value("gzip"), Header::new("Connection").with_value("keep-alive"), ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Multiple optional headers should be skipped" ); } #[test] fn test_distance_header_required_in_middle_causes_error() { // Required header in middle should cause error and misalignment let observed = vec![Header::new("Host"), Header::new("Connection").with_value("keep-alive")]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), // Required, missing Header::new("Connection").with_value("keep-alive"), ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), // 1 error falls in High range (0-2 errors) "Required header missing should cause 1 error" ); } #[test] fn test_distance_header_realistic_browser_with_optional_skips() { let observed = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Accept").with_value("text/html"), Header::new("Connection").with_value("keep-alive"), ]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Accept").with_value("text/html"), Header::new("Accept-Language") .optional() .with_value("en-US"), // Optional, missing Header::new("Accept-Encoding").optional().with_value("gzip"), // Optional, missing Header::new("Cookie").optional(), // Optional, missing Header::new("Connection").with_value("keep-alive"), ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Browser should match signature even with optional headers missing" ); } #[test] fn test_distance_header_missing_optional_header() { let observed = vec![Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0")]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Accept-Language") .optional() .with_value("en-US"), // Missing but optional ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Missing optional headers should not cause errors" ); } #[test] fn test_distance_header_missing_required_header() { let observed = vec![Header::new("Host")]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), // Missing and NOT optional ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), // 1 error out of many "Missing required headers should cause errors" ); } #[test] fn test_distance_header_extra_headers_in_observed() { let observed = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("X-Custom-Header").with_value("custom"), // Extra header ]; let signature = vec![Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0")]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), // 1 error for extra header "Extra headers in observed should cause errors" ); } #[test] fn test_distance_header_optional_header_at_end() { let observed = vec![Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0")]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Accept-Language") .optional() .with_value("en-US"), // Optional, missing ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Missing optional headers at end should not cause errors" ); } #[test] fn test_distance_header_required_header_at_end() { let observed = vec![Header::new("Host")]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), // Required, missing ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Missing required headers should cause 1 error" ); } #[test] fn test_distance_header_observed_vs_signature_with_optional() { let observed = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Accept").with_value("text/html"), Header::new("Accept-Language").with_value("en-US"), ]; let signature = vec![ Header::new("Host"), Header::new("User-Agent").with_value("Mozilla/5.0"), Header::new("Accept").with_value("text/html"), Header::new("Accept-Language") .optional() .with_value("en-US"), // Optional but value must match ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Should match perfectly: all headers match including values for optional headers" ); } #[test] fn test_distance_header_value_mismatch_not_optional() { let observed = vec![Header::new("Host"), Header::new("Connection").with_value("keep-alive")]; let signature = vec![ Header::new("Host"), Header::new("Connection").with_value("close"), // Different value, not optional ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Should have 1 error out of 2 headers" ); } #[test] fn test_distance_header_realistic_browser_scenario() { let observed = vec![ Header::new("Host"), Header::new("User-Agent") .with_value("Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/120.0.0.0"), Header::new("Accept").with_value("text/html,application/xhtml+xml"), Header::new("Accept-Language").with_value("en-US,en;q=0.9"), Header::new("Accept-Encoding").with_value("gzip, deflate"), Header::new("Connection").with_value("keep-alive"), ]; // Database signature for Chrome let signature = vec![ Header::new("Host"), Header::new("User-Agent") .with_value("Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/120.0.0.0"), Header::new("Accept").with_value("text/html,application/xhtml+xml"), Header::new("Accept-Language") .optional() .with_value("en-US,en;q=0.9"), // Optional but value must match Header::new("Accept-Encoding").with_value("gzip, deflate"), Header::new("Connection").with_value("keep-alive"), ]; let result = <HttpRequestObservation as HttpDistance>::distance_header(&observed, &signature); assert_eq!( result, Some(HttpMatchQuality::High.as_score()), "Should match perfectly for realistic Chrome signature with value matching" ); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/observable.rs
huginn-net-http/src/observable.rs
use crate::http_common::{HttpCookie, HttpHeader}; use huginn_net_db::observable_signals::{HttpRequestObservation, HttpResponseObservation}; #[derive(Debug, Clone)] pub struct ObservableHttpRequest { pub matching: HttpRequestObservation, pub lang: Option<String>, pub user_agent: Option<String>, pub headers: Vec<HttpHeader>, pub cookies: Vec<HttpCookie>, pub referer: Option<String>, pub method: Option<String>, pub uri: Option<String>, } #[derive(Debug, Clone)] pub struct ObservableHttpResponse { pub matching: HttpResponseObservation, pub headers: Vec<HttpHeader>, pub status_code: Option<u16>, }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/akamai_extractor.rs
huginn-net-http/src/akamai_extractor.rs
use crate::akamai::{AkamaiFingerprint, Http2Priority, PseudoHeader, SettingId, SettingParameter}; use crate::http2_parser::Http2Parser; use crate::http2_parser::{Http2Frame, Http2FrameType}; use crate::http_common::HttpHeader; use hpack_patched::Decoder; /// Calculate the total number of bytes consumed by parsing the given frames /// /// This helper function calculates the total size of all frames (including headers), /// which is useful for tracking parsing progress when processing incremental data. /// /// # Parameters /// - `frames`: Slice of HTTP/2 frames /// /// # Returns /// The total number of bytes consumed (sum of `total_size()` for all frames) /// /// # Example /// ```no_run /// use huginn_net_http::akamai_extractor::calculate_frames_bytes_consumed; /// # use huginn_net_http::http2_parser::Http2Frame; /// # let frames: Vec<Http2Frame> = vec![]; /// let bytes_consumed = calculate_frames_bytes_consumed(&frames); /// println!("Consumed {} bytes", bytes_consumed); /// ``` #[must_use] pub fn calculate_frames_bytes_consumed(frames: &[Http2Frame]) -> usize { frames.iter().map(|f| f.total_size()).sum() } /// Extract Akamai HTTP/2 fingerprint directly from raw bytes /// /// This is a convenience function that combines parsing HTTP/2 frames and extracting /// the Akamai fingerprint in a single call. Automatically handles the HTTP/2 connection /// preface if present. /// /// # Parameters /// - `data`: Raw HTTP/2 frame data (may include connection preface) /// /// # Returns /// - `Some(AkamaiFingerprint)` if enough frames are present and fingerprint can be extracted /// - `None` if insufficient data, parsing errors, or fingerprint cannot be generated /// /// # Example /// ```no_run /// use huginn_net_http::akamai_extractor::extract_akamai_fingerprint_from_bytes; /// /// let data = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\x00\x00\x06\x04\x00\x00\x00\x00\x00"; /// if let Some(fingerprint) = extract_akamai_fingerprint_from_bytes(data) { /// println!("Akamai: {}", fingerprint.fingerprint); /// } /// ``` #[must_use] pub fn extract_akamai_fingerprint_from_bytes(data: &[u8]) -> Option<AkamaiFingerprint> { let parser = Http2Parser::new(); parser .parse_frames_skip_preface(data) .ok() .and_then(|(frames, _)| extract_akamai_fingerprint(&frames)) } /// Extract Akamai HTTP/2 fingerprint from HTTP/2 frames /// /// This function analyzes HTTP/2 connection frames (SETTINGS, WINDOW_UPDATE, PRIORITY, HEADERS) /// to generate an Akamai fingerprint following the Blackhat EU 2017 specification. /// /// # Parameters /// - `frames`: Slice of HTTP/2 frames captured from the connection start /// /// # Returns /// - `Some(AkamaiFingerprint)` if enough frames are present /// - `None` if insufficient data or parsing errors /// /// # Example /// ```no_run /// use huginn_net_http::akamai_extractor::extract_akamai_fingerprint; /// # use huginn_net_http::http2_parser::Http2Frame; /// # let frames: Vec<Http2Frame> = vec![]; /// if let Some(fingerprint) = extract_akamai_fingerprint(&frames) { /// println!("Akamai: {}", fingerprint.fingerprint); /// } /// ``` #[must_use] pub fn extract_akamai_fingerprint(frames: &[Http2Frame]) -> Option<AkamaiFingerprint> { let settings = extract_settings_parameters(frames); let window_update = extract_window_update(frames); let priority_frames = extract_priority_frames(frames); let pseudo_header_order = extract_pseudo_header_order(frames); // Require at least SETTINGS frame to generate fingerprint if settings.is_empty() { return None; } Some(AkamaiFingerprint::new( settings, window_update, priority_frames, pseudo_header_order, )) } /// Extract SETTINGS frame parameters /// /// SETTINGS frame format (RFC 7540): /// Each setting is 6 bytes: [id:16][value:32] fn extract_settings_parameters(frames: &[Http2Frame]) -> Vec<SettingParameter> { frames .iter() .find(|f| f.frame_type == Http2FrameType::Settings && f.stream_id == 0) .map(|frame| parse_settings_payload(&frame.payload)) .unwrap_or_default() } #[doc(hidden)] pub fn parse_settings_payload(payload: &[u8]) -> Vec<SettingParameter> { let mut settings = Vec::new(); let mut offset: usize = 0; while offset.saturating_add(6) <= payload.len() { if let (Some(&id_h), Some(&id_l), Some(&v0), Some(&v1), Some(&v2), Some(&v3)) = ( payload.get(offset), payload.get(offset.saturating_add(1)), payload.get(offset.saturating_add(2)), payload.get(offset.saturating_add(3)), payload.get(offset.saturating_add(4)), payload.get(offset.saturating_add(5)), ) { let id = u16::from_be_bytes([id_h, id_l]); let value = u32::from_be_bytes([v0, v1, v2, v3]); settings.push(SettingParameter { id: SettingId::from(id), value }); } offset = offset.saturating_add(6); } settings } /// Extract WINDOW_UPDATE value /// /// WINDOW_UPDATE frame format (RFC 7540): /// [R:1][Window Size Increment:31] fn extract_window_update(frames: &[Http2Frame]) -> u32 { frames .iter() .find(|f| f.frame_type == Http2FrameType::WindowUpdate && f.stream_id == 0) .and_then(|frame| parse_window_update_payload(&frame.payload)) .unwrap_or(0) } #[doc(hidden)] pub fn parse_window_update_payload(payload: &[u8]) -> Option<u32> { if payload.len() < 4 { return None; } // Clear reserved bit (first bit) let increment = u32::from_be_bytes([payload[0] & 0x7F, payload[1], payload[2], payload[3]]); Some(increment) } /// Extract PRIORITY frames /// /// PRIORITY frame format (RFC 7540): /// [E:1][Stream Dependency:31][Weight:8] fn extract_priority_frames(frames: &[Http2Frame]) -> Vec<Http2Priority> { frames .iter() .filter(|f| f.frame_type == Http2FrameType::Priority) .filter_map(|frame| parse_priority_payload(frame.stream_id, &frame.payload)) .collect() } #[doc(hidden)] pub fn parse_priority_payload(stream_id: u32, payload: &[u8]) -> Option<Http2Priority> { if payload.len() < 5 { return None; } let exclusive = (payload[0] & 0x80) != 0; let depends_on = u32::from_be_bytes([payload[0] & 0x7F, payload[1], payload[2], payload[3]]); let weight = payload[4]; Some(Http2Priority { stream_id, exclusive, depends_on, weight }) } /// Extract pseudo-header order from HEADERS frame /// /// Pseudo-headers in HTTP/2: /// - `:method` /// - `:path` /// - `:authority` /// - `:scheme` /// - `:status` (responses only) fn extract_pseudo_header_order(frames: &[Http2Frame]) -> Vec<PseudoHeader> { // Find first HEADERS frame let headers_frame = frames .iter() .find(|f| f.frame_type == Http2FrameType::Headers && f.stream_id > 0); if let Some(frame) = headers_frame { if let Ok(headers) = decode_headers(&frame.payload) { return headers .iter() .filter(|h| h.name.starts_with(':')) .map(|h| PseudoHeader::from(h.name.as_str())) .collect(); } } Vec::new() } /// Decode HPACK-encoded headers fn decode_headers(payload: &[u8]) -> Result<Vec<HttpHeader>, hpack_patched::decoder::DecoderError> { let mut decoder = Decoder::new(); let mut headers = Vec::new(); match decoder.decode(payload) { Ok(header_list) => { for (position, (name, value)) in header_list.into_iter().enumerate() { if let (Ok(name_str), Ok(value_str)) = (String::from_utf8(name), String::from_utf8(value)) { let source = if name_str.starts_with(':') { crate::http_common::HeaderSource::Http2PseudoHeader } else { crate::http_common::HeaderSource::Http2Header }; headers.push(HttpHeader { name: name_str, value: Some(value_str), position, source, }); } } Ok(headers) } Err(e) => Err(e), } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/lib.rs
huginn-net-http/src/lib.rs
#![forbid(unsafe_code)] pub use huginn_net_db as db; pub use huginn_net_db::http; pub mod akamai; pub mod akamai_extractor; pub mod filter; pub mod http1_parser; pub mod http1_process; pub mod http2_fingerprint_extractor; pub mod http2_parser; pub mod http2_process; pub mod http_common; pub mod http_languages; pub mod http_process; pub mod packet_parser; pub mod raw_filter; pub mod packet_hash; pub mod display; pub mod error; pub mod observable; pub mod output; pub mod parallel; pub mod process; pub mod signature_matcher; // Re-exports pub use akamai::{AkamaiFingerprint, Http2Priority, PseudoHeader, SettingId, SettingParameter}; pub use akamai_extractor::{ calculate_frames_bytes_consumed, extract_akamai_fingerprint, extract_akamai_fingerprint_from_bytes, }; pub use error::*; pub use filter::*; pub use http1_process::{ build_absent_headers_from_new_parser, convert_headers_to_http_format, parse_http1_request, Http1Processor, }; pub use http2_fingerprint_extractor::Http2FingerprintExtractor; pub use http2_parser::{Http2Frame, Http2FrameType, Http2Parser, HTTP2_CONNECTION_PREFACE}; pub use http2_process::{parse_http2_request, Http2Processor}; pub use http_common::HttpProcessor; pub use http_process::*; pub use observable::*; pub use output::*; pub use parallel::{DispatchResult, PoolStats, WorkerPool, WorkerStats}; pub use process::*; pub use signature_matcher::*; use crate::packet_parser::{parse_packet, IpPacket}; use pcap_file::pcap::PcapReader; use pnet::datalink::{self, Channel, Config}; use std::fs::File; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; use tracing::{debug, error}; use ttl_cache::TtlCache; /// Configuration for parallel processing /// /// Controls the behavior of worker threads in parallel mode. #[derive(Debug, Clone)] pub struct ParallelConfig { /// Number of worker threads to spawn pub num_workers: usize, /// Size of packet queue per worker (affects memory usage and backpressure) pub queue_size: usize, /// Maximum packets to process in one batch before checking for new work /// Higher = better throughput, lower = better latency (typical: 8-32) pub batch_size: usize, /// Worker receive timeout in milliseconds /// Lower = faster shutdown, higher = better throughput (typical: 5-20) pub timeout_ms: u64, } /// An HTTP-focused passive fingerprinting analyzer. /// /// The `HuginnNetHttp` struct handles HTTP packet analysis for browser fingerprinting, /// web server detection, and HTTP protocol analysis using p0f-style methodologies. pub struct HuginnNetHttp { http_flows: TtlCache<FlowKey, TcpFlow>, http_processors: HttpProcessors, parallel_config: Option<ParallelConfig>, worker_pool: Option<Arc<WorkerPool>>, database: Option<Arc<db::Database>>, max_connections: usize, filter_config: Option<FilterConfig>, } impl HuginnNetHttp { /// Creates a new instance of `HuginnNetHttp` in sequential mode. /// /// # Parameters /// - `database`: Optional signature database for HTTP matching /// - `max_connections`: Maximum number of HTTP flows to track /// /// # Returns /// A new `HuginnNetHttp` instance ready for HTTP analysis. pub fn new( database: Option<Arc<db::Database>>, max_connections: usize, ) -> Result<Self, HuginnNetHttpError> { Ok(Self { http_flows: TtlCache::new(max_connections), http_processors: HttpProcessors::new(), parallel_config: None, worker_pool: None, database, max_connections, filter_config: None, }) } /// Creates a new instance of `HuginnNetHttp` with full parallel configuration. /// /// # Parameters /// - `database`: Optional signature database for HTTP matching /// - `max_connections`: Maximum number of HTTP flows to track per worker (typical: 1000-10000) /// - `num_workers`: Number of worker threads (recommended: 2 for HTTP due to flow tracking) /// - `queue_size`: Size of each worker's packet queue (typical: 100-200) /// - `batch_size`: Maximum packets to process in one batch (typical: 8-32, recommended: 16) /// - `timeout_ms`: Worker receive timeout in milliseconds (typical: 5-20, recommended: 10) /// /// # Configuration Guide /// /// ## batch_size /// - **Low (8)**: Lower latency, more responsive, higher overhead /// - **Medium (16)**: Balanced throughput and latency *(recommended)* /// - **High (32-64)**: Maximum throughput, higher latency /// /// ## timeout_ms /// - **Low (5-10ms)**: Fast shutdown, slightly lower throughput *(recommended: 10)* /// - **Medium (15-20ms)**: Better throughput, slower shutdown /// - **High (50ms+)**: Maximum throughput, slow shutdown /// /// # Example /// ```rust,no_run /// use huginn_net_http::HuginnNetHttp; /// /// // Balanced configuration (recommended for HTTP) /// let http = HuginnNetHttp::with_config(None, 1000, 2, 100, 16, 10); /// /// // Low latency /// let low_latency = HuginnNetHttp::with_config(None, 1000, 2, 100, 8, 5); /// /// // High throughput /// let high_throughput = HuginnNetHttp::with_config(None, 5000, 2, 200, 32, 15); /// ``` /// /// # Returns /// A new `HuginnNetHttp` instance configured for parallel processing. pub fn with_config( database: Option<Arc<db::Database>>, max_connections: usize, num_workers: usize, queue_size: usize, batch_size: usize, timeout_ms: u64, ) -> Result<Self, HuginnNetHttpError> { Ok(Self { http_flows: TtlCache::new(max_connections), http_processors: HttpProcessors::new(), parallel_config: Some(ParallelConfig { num_workers, queue_size, batch_size, timeout_ms, }), worker_pool: None, database, max_connections, filter_config: None, }) } /// Configure packet filtering (builder pattern) pub fn with_filter(mut self, config: FilterConfig) -> Self { self.filter_config = Some(config); self } /// Initializes the worker pool for parallel processing. /// /// Must be called after `with_config` and before calling `analyze_network` or `analyze_pcap`. /// /// # Parameters /// - `result_tx`: Channel sender for analysis results /// /// # Returns /// `Ok(())` if pool initialized successfully, error otherwise. pub fn init_pool( &mut self, result_tx: Sender<HttpAnalysisResult>, ) -> Result<(), HuginnNetHttpError> { if let Some(config) = &self.parallel_config { let pool = WorkerPool::new( config.num_workers, config.queue_size, config.batch_size, config.timeout_ms, result_tx, self.database.clone(), self.max_connections, self.filter_config.clone(), )?; self.worker_pool = Some(pool); Ok(()) } else { Err(HuginnNetHttpError::Misconfiguration( "Parallel config not set. Use with_config() instead of new()".to_string(), )) } } /// Returns a reference to the worker pool if initialized. pub fn worker_pool(&self) -> Option<&Arc<WorkerPool>> { self.worker_pool.as_ref() } /// Returns current worker pool statistics if parallel mode is active. pub fn stats(&self) -> Option<PoolStats> { self.worker_pool.as_ref().map(|pool| pool.stats()) } fn process_with<F>( &mut self, packet_fn: F, sender: Sender<HttpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetHttpError> where F: FnMut() -> Option<Result<Vec<u8>, HuginnNetHttpError>>, { if self.worker_pool.is_some() { self.process_parallel(packet_fn, cancel_signal) } else { self.process_sequential(packet_fn, sender, cancel_signal) } } fn process_sequential<F>( &mut self, mut packet_fn: F, sender: Sender<HttpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetHttpError> where F: FnMut() -> Option<Result<Vec<u8>, HuginnNetHttpError>>, { while let Some(packet_result) = packet_fn() { if let Some(ref cancel) = cancel_signal { if cancel.load(Ordering::Relaxed) { debug!("Cancellation signal received, stopping packet processing"); break; } } match packet_result { Ok(packet) => match self.process_packet(&packet) { Ok(result) => { if sender.send(result).is_err() { error!("Receiver dropped, stopping packet processing"); break; } } Err(http_error) => { debug!("Error processing packet: {}", http_error); } }, Err(e) => { error!("Failed to read packet: {}", e); } } } Ok(()) } fn process_parallel<F>( &mut self, mut packet_fn: F, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetHttpError> where F: FnMut() -> Option<Result<Vec<u8>, HuginnNetHttpError>>, { let pool = self.worker_pool.as_ref().ok_or_else(|| { HuginnNetHttpError::Misconfiguration("Worker pool not initialized".to_string()) })?; while let Some(packet_result) = packet_fn() { if let Some(ref cancel) = cancel_signal { if cancel.load(Ordering::Relaxed) { debug!("Cancellation signal received, stopping packet processing"); break; } } match packet_result { Ok(packet) => { let _ = pool.dispatch(packet); } Err(e) => { error!("Failed to read packet: {}", e); } } } Ok(()) } /// Analyzes network traffic from a live network interface for HTTP packets. /// /// # Parameters /// - `interface_name`: The name of the network interface to capture from. /// - `sender`: A channel sender to send analysis results. /// - `cancel_signal`: Optional atomic boolean to signal cancellation. /// /// # Returns /// A `Result` indicating success or failure. pub fn analyze_network( &mut self, interface_name: &str, sender: Sender<HttpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetHttpError> { let interfaces = datalink::interfaces(); let interface = interfaces .into_iter() .find(|iface| iface.name == interface_name) .ok_or_else(|| { HuginnNetHttpError::Parse(format!( "Could not find network interface: {interface_name}" )) })?; debug!("Using network interface: {}", interface.name); let config = Config { promiscuous: true, ..Config::default() }; let (_tx, mut rx) = match datalink::channel(&interface, config) { Ok(Channel::Ethernet(tx, rx)) => (tx, rx), Ok(_) => return Err(HuginnNetHttpError::Parse("Unhandled channel type".to_string())), Err(e) => { return Err(HuginnNetHttpError::Parse(format!("Unable to create channel: {e}"))) } }; self.process_with( move || match rx.next() { Ok(packet) => Some(Ok(packet.to_vec())), Err(e) => { Some(Err(HuginnNetHttpError::Parse(format!("Error receiving packet: {e}")))) } }, sender, cancel_signal, ) } /// Analyzes HTTP packets from a PCAP file. /// /// # Parameters /// - `pcap_path`: Path to the PCAP file to analyze. /// - `sender`: A channel sender to send analysis results. /// - `cancel_signal`: Optional atomic boolean to signal cancellation. /// /// # Returns /// A `Result` indicating success or failure. pub fn analyze_pcap( &mut self, pcap_path: &str, sender: Sender<HttpAnalysisResult>, cancel_signal: Option<Arc<AtomicBool>>, ) -> Result<(), HuginnNetHttpError> { let file = File::open(pcap_path) .map_err(|e| HuginnNetHttpError::Parse(format!("Failed to open PCAP file: {e}")))?; let mut pcap_reader = PcapReader::new(file) .map_err(|e| HuginnNetHttpError::Parse(format!("Failed to create PCAP reader: {e}")))?; self.process_with( move || match pcap_reader.next_packet() { Some(Ok(packet)) => Some(Ok(packet.data.to_vec())), Some(Err(e)) => { Some(Err(HuginnNetHttpError::Parse(format!("Error reading PCAP packet: {e}")))) } None => None, }, sender, cancel_signal, ) } /// Processes a single packet and extracts HTTP information if present. /// /// # Parameters /// - `packet`: The raw packet data. /// /// # Returns /// A `Result` containing an `HttpAnalysisResult` or an error. fn process_packet(&mut self, packet: &[u8]) -> Result<HttpAnalysisResult, HuginnNetHttpError> { if let Some(ref filter) = self.filter_config { if !raw_filter::apply(packet, filter) { debug!("Filtered out packet before parsing"); return Ok(HttpAnalysisResult { http_request: None, http_response: None }); } } let matcher = self .database .as_ref() .map(|db| SignatureMatcher::new(db.as_ref())); match parse_packet(packet) { IpPacket::Ipv4(ipv4) => process::process_ipv4_packet( &ipv4, &mut self.http_flows, &self.http_processors, matcher.as_ref(), ), IpPacket::Ipv6(ipv6) => process::process_ipv6_packet( &ipv6, &mut self.http_flows, &self.http_processors, matcher.as_ref(), ), IpPacket::None => Ok(HttpAnalysisResult { http_request: None, http_response: None }), } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/process.rs
huginn-net-http/src/process.rs
use crate::error::HuginnNetHttpError; use crate::output::{ Browser, BrowserQualityMatched, HttpRequestOutput, HttpResponseOutput, IpPort, WebServer, WebServerQualityMatched, }; use crate::{http_process, HttpAnalysisResult, SignatureMatcher}; use huginn_net_db::http::HttpDiagnosis; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use pnet::packet::tcp::TcpPacket; use pnet::packet::Packet; use std::net::IpAddr; use ttl_cache::TtlCache; pub struct ObservablePackage { pub source: IpPort, pub destination: IpPort, pub http_result: HttpAnalysisResult, } /// Processes an IPv4 packet for HTTP content. pub fn process_ipv4_packet( ipv4: &Ipv4Packet, http_flows: &mut TtlCache<http_process::FlowKey, http_process::TcpFlow>, http_processors: &http_process::HttpProcessors, matcher: Option<&SignatureMatcher>, ) -> Result<HttpAnalysisResult, HuginnNetHttpError> { let observable_package = create_observable_package_ipv4(ipv4, http_flows, http_processors, matcher)?; Ok(observable_package.http_result) } fn create_observable_package_ipv4( ipv4: &Ipv4Packet, http_flows: &mut TtlCache<http_process::FlowKey, http_process::TcpFlow>, http_processors: &http_process::HttpProcessors, matcher: Option<&SignatureMatcher>, ) -> Result<ObservablePackage, HuginnNetHttpError> { let tcp = TcpPacket::new(ipv4.payload()) .ok_or_else(|| HuginnNetHttpError::Parse("Invalid TCP packet".to_string()))?; let source = IpPort { ip: IpAddr::V4(ipv4.get_source()), port: tcp.get_source() }; let destination = IpPort { ip: IpAddr::V4(ipv4.get_destination()), port: tcp.get_destination() }; let http_package = http_process::process_http_ipv4(ipv4, http_flows, http_processors)?; let mut http_result = HttpAnalysisResult { http_request: None, http_response: None }; if let Some(http_request) = http_package.http_request { let browser_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_http_request(&http_request) { BrowserQualityMatched { browser: Some(Browser::from(label)), quality: huginn_net_db::utils::MatchQualityType::Matched(quality), } } else { BrowserQualityMatched { browser: None, quality: huginn_net_db::utils::MatchQualityType::NotMatched, } } } else { BrowserQualityMatched { browser: None, quality: huginn_net_db::utils::MatchQualityType::Disabled, } }; let user_agent = http_request.user_agent.clone(); let (signature_matcher, ua_matcher) = if let Some(matcher) = matcher { let sig_match = matcher.matching_by_http_request(&http_request); let ua_match = user_agent .as_ref() .and_then(|ua| matcher.matching_by_user_agent(ua.clone())); (sig_match, ua_match) } else { (None, None) }; let diagnosis = crate::http_common::get_diagnostic( user_agent, ua_matcher, signature_matcher.map(|(label, _signature, _quality)| label), ); let request_output = HttpRequestOutput { source: IpPort::new(std::net::IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new( std::net::IpAddr::V4(ipv4.get_destination()), tcp.get_destination(), ), lang: http_request.lang.clone(), diagnosis, browser_matched: browser_quality, sig: http_request, }; http_result.http_request = Some(request_output); } if let Some(http_response) = http_package.http_response { let web_server_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_http_response(&http_response) { WebServerQualityMatched { web_server: Some(WebServer::from(label)), quality: huginn_net_db::utils::MatchQualityType::Matched(quality), } } else { WebServerQualityMatched { web_server: None, quality: huginn_net_db::utils::MatchQualityType::NotMatched, } } } else { WebServerQualityMatched { web_server: None, quality: huginn_net_db::utils::MatchQualityType::Disabled, } }; let response_output = HttpResponseOutput { source: IpPort::new(std::net::IpAddr::V4(ipv4.get_source()), tcp.get_source()), destination: IpPort::new( std::net::IpAddr::V4(ipv4.get_destination()), tcp.get_destination(), ), diagnosis: HttpDiagnosis::None, // Default diagnosis for responses web_server_matched: web_server_quality, sig: http_response, }; http_result.http_response = Some(response_output); } Ok(ObservablePackage { source, destination, http_result }) } /// Processes an IPv6 packet for HTTP content. pub fn process_ipv6_packet( ipv6: &Ipv6Packet, http_flows: &mut TtlCache<http_process::FlowKey, http_process::TcpFlow>, http_processors: &http_process::HttpProcessors, matcher: Option<&SignatureMatcher>, ) -> Result<HttpAnalysisResult, HuginnNetHttpError> { let observable_package = create_observable_package_ipv6(ipv6, http_flows, http_processors, matcher)?; Ok(observable_package.http_result) } fn create_observable_package_ipv6( ipv6: &Ipv6Packet, http_flows: &mut TtlCache<http_process::FlowKey, http_process::TcpFlow>, http_processors: &http_process::HttpProcessors, matcher: Option<&SignatureMatcher>, ) -> Result<ObservablePackage, HuginnNetHttpError> { // Extract TCP info for source/destination ports let tcp = TcpPacket::new(ipv6.payload()) .ok_or_else(|| HuginnNetHttpError::Parse("Invalid TCP packet".to_string()))?; let source = IpPort { ip: IpAddr::V6(ipv6.get_source()), port: tcp.get_source() }; let destination = IpPort { ip: IpAddr::V6(ipv6.get_destination()), port: tcp.get_destination() }; let http_package = http_process::process_http_ipv6(ipv6, http_flows, http_processors)?; let mut http_result = HttpAnalysisResult { http_request: None, http_response: None }; // Process HTTP request if let Some(http_request) = http_package.http_request { let browser_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_http_request(&http_request) { BrowserQualityMatched { browser: Some(Browser::from(label)), quality: huginn_net_db::utils::MatchQualityType::Matched(quality), } } else { BrowserQualityMatched { browser: None, quality: huginn_net_db::utils::MatchQualityType::NotMatched, } } } else { BrowserQualityMatched { browser: None, quality: huginn_net_db::utils::MatchQualityType::Disabled, } }; let user_agent = http_request.user_agent.clone(); let (signature_matcher, ua_matcher) = if let Some(matcher) = matcher { let sig_match = matcher.matching_by_http_request(&http_request); let ua_match = user_agent .as_ref() .and_then(|ua| matcher.matching_by_user_agent(ua.clone())); (sig_match, ua_match) } else { (None, None) }; let diagnosis = crate::http_common::get_diagnostic( user_agent, ua_matcher, signature_matcher.map(|(label, _signature, _quality)| label), ); let request_output = HttpRequestOutput { source: IpPort::new(std::net::IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new( std::net::IpAddr::V6(ipv6.get_destination()), tcp.get_destination(), ), lang: http_request.lang.clone(), diagnosis, browser_matched: browser_quality, sig: http_request, }; http_result.http_request = Some(request_output); } // Process HTTP response if let Some(http_response) = http_package.http_response { let web_server_quality = if let Some(matcher) = matcher { if let Some((label, _signature, quality)) = matcher.matching_by_http_response(&http_response) { WebServerQualityMatched { web_server: Some(WebServer::from(label)), quality: huginn_net_db::utils::MatchQualityType::Matched(quality), } } else { WebServerQualityMatched { web_server: None, quality: huginn_net_db::utils::MatchQualityType::NotMatched, } } } else { WebServerQualityMatched { web_server: None, quality: huginn_net_db::utils::MatchQualityType::Disabled, } }; let response_output = HttpResponseOutput { source: IpPort::new(std::net::IpAddr::V6(ipv6.get_source()), tcp.get_source()), destination: IpPort::new( std::net::IpAddr::V6(ipv6.get_destination()), tcp.get_destination(), ), diagnosis: HttpDiagnosis::None, // Default diagnosis for responses web_server_matched: web_server_quality, sig: http_response, }; http_result.http_response = Some(response_output); } Ok(ObservablePackage { source, destination, http_result }) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http1_process.rs
huginn-net-http/src/http1_process.rs
use crate::error::HuginnNetHttpError; use crate::http::Header; use crate::http_common::HttpProcessor; use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; use crate::{http, http1_parser, http2_parser, http2_process, http_common, http_languages}; use tracing::debug; /// HTTP/1.x Protocol Processor /// /// Implements the HttpProcessor trait for HTTP/1.0 and HTTP/1.1 protocols. /// Handles both request and response processing with proper protocol detection. /// Contains a parser instance that is created once and reused. pub struct Http1Processor { parser: http1_parser::Http1Parser, } impl Http1Processor { pub fn new() -> Self { Self { parser: http1_parser::Http1Parser::new() } } } impl Default for Http1Processor { fn default() -> Self { Self::new() } } impl HttpProcessor for Http1Processor { fn can_process_request(&self, data: &[u8]) -> bool { if data.len() < 16 { // Minimum for "GET / HTTP/1.1\r\n" return false; } // VERY SPECIFIC: Must NOT be HTTP/2 first if http2_parser::is_http2_traffic(data) { return false; } let data_str = String::from_utf8_lossy(data); let first_line = data_str.lines().next().unwrap_or(""); // SPECIFIC: Must be exact HTTP/1.x request line format let parts: Vec<&str> = first_line.split_whitespace().collect(); if parts.len() != 3 { return false; } // SPECIFIC: Valid HTTP/1.x methods only let methods = [ "GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH", "TRACE", "CONNECT", "PROPFIND", "PROPPATCH", "MKCOL", "COPY", "MOVE", "LOCK", "UNLOCK", ]; // SPECIFIC: Must be exact HTTP/1.0 or HTTP/1.1 methods.contains(&parts[0]) && (parts[2] == "HTTP/1.0" || parts[2] == "HTTP/1.1") && !parts[1].is_empty() // Must have URI } fn can_process_response(&self, data: &[u8]) -> bool { if data.len() < 12 { // Minimum for "HTTP/1.1 200" return false; } // VERY SPECIFIC: Must NOT look like HTTP/2 frames if data.len() >= 9 && http2_process::looks_like_http2_response(data) { return false; } let data_str = String::from_utf8_lossy(data); let first_line = data_str.lines().next().unwrap_or(""); // SPECIFIC: Must be exact HTTP/1.x response line format let parts: Vec<&str> = first_line.splitn(3, ' ').collect(); if parts.len() < 2 { return false; } // SPECIFIC: Must be exact HTTP/1.0 or HTTP/1.1 with valid status code (parts[0] == "HTTP/1.0" || parts[0] == "HTTP/1.1") && parts[1].len() == 3 // Status code must be 3 digits && parts[1].chars().all(|c| c.is_ascii_digit()) // Must be numeric } fn has_complete_data(&self, data: &[u8]) -> bool { has_complete_headers(data) } fn process_request( &self, data: &[u8], ) -> Result<Option<ObservableHttpRequest>, HuginnNetHttpError> { parse_http1_request(data, &self.parser) } fn process_response( &self, data: &[u8], ) -> Result<Option<ObservableHttpResponse>, HuginnNetHttpError> { parse_http1_response(data, &self.parser) } fn supported_version(&self) -> http::Version { http::Version::V11 // Primary version, but also supports V10 } fn name(&self) -> &'static str { "HTTP/1.x" } } /// Check if HTTP/1.x headers are complete (lightweight verification) pub fn has_complete_headers(data: &[u8]) -> bool { // Fast byte-level check for \r\n\r\n if data.len() < 4 { return false; } // Look for the header separator pattern for i in 0..data.len().saturating_sub(3) { if data[i] == b'\r' && data.get(i.saturating_add(1)) == Some(&b'\n') && data.get(i.saturating_add(2)) == Some(&b'\r') && data.get(i.saturating_add(3)) == Some(&b'\n') { return true; } } false } fn convert_http1_request_to_observable(req: http1_parser::Http1Request) -> ObservableHttpRequest { let lang = req .accept_language .and_then(http_languages::get_highest_quality_language); let headers_in_order = convert_headers_to_http_format(&req.headers, true); let headers_absent = build_absent_headers_from_new_parser(&req.headers, true); ObservableHttpRequest { matching: huginn_net_db::observable_signals::HttpRequestObservation { version: req.version, horder: headers_in_order, habsent: headers_absent, expsw: extract_traffic_classification(req.user_agent.as_deref()), }, lang, user_agent: req.user_agent.clone(), headers: req.headers, cookies: req.cookies.clone(), referer: req.referer.clone(), method: Some(req.method), uri: Some(req.uri), } } fn convert_http1_response_to_observable( res: http1_parser::Http1Response, ) -> ObservableHttpResponse { let headers_in_order = convert_headers_to_http_format(&res.headers, false); let headers_absent = build_absent_headers_from_new_parser(&res.headers, false); ObservableHttpResponse { matching: huginn_net_db::observable_signals::HttpResponseObservation { version: res.version, horder: headers_in_order, habsent: headers_absent, expsw: extract_traffic_classification(res.server.as_deref()), }, headers: res.headers, status_code: Some(res.status_code), } } /// Convert HTTP headers to fingerprint format /// Formats headers according to p0f-style fingerprinting rules. pub fn convert_headers_to_http_format( headers: &[http_common::HttpHeader], is_request: bool, ) -> Vec<Header> { let mut headers_in_order: Vec<Header> = Vec::new(); let optional_list = if is_request { http::request_optional_headers() } else { http::response_optional_headers() }; let skip_value_list = if is_request { http::request_skip_value_headers() } else { http::response_skip_value_headers() }; for header in headers { if optional_list.contains(&header.name.as_str()) { headers_in_order.push(http::Header::new(&header.name).optional()); } else if skip_value_list.contains(&header.name.as_str()) { headers_in_order.push(http::Header::new(&header.name)); } else { headers_in_order .push(Header::new(&header.name).with_optional_value(header.value.clone())); } } headers_in_order } /// Build list of absent common headers for fingerprinting /// Returns a list of common headers that are expected but not present in the request/response. pub fn build_absent_headers_from_new_parser( headers: &[http_common::HttpHeader], is_request: bool, ) -> Vec<Header> { let mut headers_absent: Vec<Header> = Vec::new(); let common_list: Vec<&str> = if is_request { http::request_common_headers() } else { http::response_common_headers() }; let current_headers: Vec<String> = headers.iter().map(|h| h.name.to_lowercase()).collect(); for header in &common_list { if !current_headers.contains(&header.to_lowercase()) { headers_absent.push(Header::new(header)); } } headers_absent } pub fn parse_http1_request( data: &[u8], parser: &http1_parser::Http1Parser, ) -> Result<Option<ObservableHttpRequest>, HuginnNetHttpError> { match parser.parse_request(data) { Ok(Some(req)) => { let observable = convert_http1_request_to_observable(req); Ok(Some(observable)) } Ok(None) => { debug!("Incomplete HTTP/1.x request data"); Ok(None) } Err(e) => { debug!("Failed to parse HTTP/1.x request: {}", e); Err(HuginnNetHttpError::Parse(format!("Failed to parse HTTP/1.x request: {e}"))) } } } pub fn parse_http1_response( data: &[u8], parser: &http1_parser::Http1Parser, ) -> Result<Option<ObservableHttpResponse>, HuginnNetHttpError> { match parser.parse_response(data) { Ok(Some(res)) => { let observable = convert_http1_response_to_observable(res); Ok(Some(observable)) } Ok(None) => { debug!("Incomplete HTTP/1.x response data"); Ok(None) } Err(e) => { debug!("Failed to parse HTTP/1.x response: {}", e); Err(HuginnNetHttpError::Parse(format!("Failed to parse HTTP/1.x response: {e}"))) } } } fn extract_traffic_classification(value: Option<&str>) -> String { value.unwrap_or("???").to_string() } /// Check if data looks like HTTP/1.x response pub fn looks_like_http1_response(data: &[u8]) -> bool { if data.len() < 12 { // Minimum for "HTTP/1.1 200" return false; } // Must NOT look like HTTP/2 frames if data.len() >= 9 && http2_process::looks_like_http2_response(data) { return false; } let data_str = String::from_utf8_lossy(data); let first_line = data_str.lines().next().unwrap_or(""); // Must be exact HTTP/1.x response line format let parts: Vec<&str> = first_line.split_whitespace().collect(); if parts.len() < 2 { return false; } // Check HTTP version let version_str = parts[0]; if version_str != "HTTP/1.0" && version_str != "HTTP/1.1" { return false; } // Check status code (must be 3 digits) let status_str = parts[1]; status_str.len() == 3 && status_str.chars().all(|c| c.is_ascii_digit()) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/filter.rs
huginn-net-http/src/filter.rs
use pnet::ipnetwork::{IpNetwork, Ipv4Network, Ipv6Network}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; /// Filter mode: Allow (allowlist) or Deny (denylist) #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum FilterMode { /// Allow only matching packets (allowlist mode) #[default] Allow, /// Deny matching packets (denylist mode) Deny, } /// Port filter configuration /// /// Filters packets based on TCP source and/or destination ports. /// Supports individual ports, ranges, and lists. /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// // Single port /// let filter = PortFilter::new().destination(443); /// /// // Multiple ports /// let filter = PortFilter::new().destination_list(vec![80, 443, 8080]); /// /// // Port range /// let filter = PortFilter::new().destination_range(8000..9000); /// ``` #[derive(Debug, Clone, Default)] pub struct PortFilter { /// Source ports to match pub source_ports: Vec<u16>, /// Destination ports to match pub destination_ports: Vec<u16>, /// Source port ranges (inclusive) pub source_ranges: Vec<(u16, u16)>, /// Destination port ranges (inclusive) pub destination_ranges: Vec<(u16, u16)>, /// Match ANY port (source OR destination)? pub match_any: bool, } impl PortFilter { /// Create a new empty port filter pub fn new() -> Self { Self::default() } /// Add a destination port /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// let filter = PortFilter::new().destination(443); /// ``` pub fn destination(mut self, port: u16) -> Self { self.destination_ports.push(port); self } /// Add a source port /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// let filter = PortFilter::new().source(12345); /// ``` pub fn source(mut self, port: u16) -> Self { self.source_ports.push(port); self } /// Add a destination port range (inclusive) /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// let filter = PortFilter::new().destination_range(8000..9000); /// // Matches ports 8000 through 8999 /// ``` pub fn destination_range(mut self, range: std::ops::Range<u16>) -> Self { self.destination_ranges .push((range.start, range.end.saturating_sub(1))); self } /// Add a source port range (inclusive) /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// let filter = PortFilter::new().source_range(10000..20000); /// // Matches ports 10000 through 19999 /// ``` pub fn source_range(mut self, range: std::ops::Range<u16>) -> Self { self.source_ranges .push((range.start, range.end.saturating_sub(1))); self } /// Add multiple destination ports /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// let filter = PortFilter::new().destination_list(vec![80, 443, 8080, 8443]); /// ``` pub fn destination_list(mut self, ports: Vec<u16>) -> Self { self.destination_ports.extend(ports); self } /// Add multiple source ports /// /// # Examples /// /// ```rust /// use huginn_net_http::PortFilter; /// /// let filter = PortFilter::new().source_list(vec![12345, 54321, 9999]); /// ``` pub fn source_list(mut self, ports: Vec<u16>) -> Self { self.source_ports.extend(ports); self } /// Match if ANY port matches (source OR destination) /// /// By default, all specified filters must match. With `match_any()`, /// the packet passes if either source OR destination matches. pub fn any_port(mut self) -> Self { self.match_any = true; self } /// Check if packet matches port filter /// /// # Returns /// /// `true` if the packet matches the filter criteria pub fn matches(&self, src_port: u16, dst_port: u16) -> bool { if self.match_any { let all_ports: Vec<u16> = self .source_ports .iter() .chain(self.destination_ports.iter()) .copied() .collect(); let all_ranges: Vec<(u16, u16)> = self .source_ranges .iter() .chain(self.destination_ranges.iter()) .copied() .collect(); let port_match = all_ports.contains(&src_port) || all_ports.contains(&dst_port) || all_ranges .iter() .any(|(start, end)| src_port >= *start && src_port <= *end) || all_ranges .iter() .any(|(start, end)| dst_port >= *start && dst_port <= *end); port_match } else { let src_match = self.source_ports.contains(&src_port) || self .source_ranges .iter() .any(|(start, end)| src_port >= *start && src_port <= *end); let dst_match = self.destination_ports.contains(&dst_port) || self .destination_ranges .iter() .any(|(start, end)| dst_port >= *start && dst_port <= *end); let src_ok = self.source_ports.is_empty() && self.source_ranges.is_empty() || src_match; let dst_ok = self.destination_ports.is_empty() && self.destination_ranges.is_empty() || dst_match; src_ok && dst_ok } } } /// IP address filter configuration /// /// Filters packets based on specific IPv4 or IPv6 addresses. /// /// # Examples /// /// ```rust /// use huginn_net_http::IpFilter; /// /// let filter = IpFilter::new() /// .allow("8.8.8.8") /// .unwrap() /// .allow("2001:4860:4860::8888") /// .unwrap(); /// ``` #[derive(Debug, Clone, Default)] pub struct IpFilter { /// IPv4 addresses to match pub ipv4_addresses: Vec<Ipv4Addr>, /// IPv6 addresses to match pub ipv6_addresses: Vec<Ipv6Addr>, /// Check source, destination, or both? pub check_source: bool, pub check_destination: bool, } impl IpFilter { /// Create a new IP filter that checks both source and destination by default pub fn new() -> Self { Self { check_source: true, check_destination: true, ..Default::default() } } /// Add an IP address (auto-detects IPv4/IPv6) /// /// # Errors /// /// Returns an error if the IP address string is invalid /// /// # Examples /// /// ```rust /// use huginn_net_http::IpFilter; /// /// let filter = IpFilter::new() /// .allow("192.168.1.1").unwrap() /// .allow("2001:db8::1").unwrap(); /// ``` pub fn allow(mut self, ip: &str) -> Result<Self, String> { let addr: IpAddr = ip.parse().map_err(|e| format!("Invalid IP: {e}"))?; match addr { IpAddr::V4(v4) => self.ipv4_addresses.push(v4), IpAddr::V6(v6) => self.ipv6_addresses.push(v6), } Ok(self) } /// Add multiple IP addresses /// /// # Errors /// /// Returns an error if any IP address string is invalid /// /// # Examples /// /// ```rust /// use huginn_net_http::IpFilter; /// /// let filter = IpFilter::new() /// .allow_list(vec!["8.8.8.8", "1.1.1.1", "2001:4860:4860::8888"]) /// .unwrap(); /// ``` pub fn allow_list(mut self, ips: Vec<&str>) -> Result<Self, String> { for ip in ips { self = self.allow(ip)?; } Ok(self) } /// Only check source addresses /// /// By default, both source and destination are checked. pub fn source_only(mut self) -> Self { self.check_source = true; self.check_destination = false; self } /// Only check destination addresses /// /// By default, both source and destination are checked. pub fn destination_only(mut self) -> Self { self.check_source = false; self.check_destination = true; self } /// Check if packet matches IP filter /// /// # Returns /// /// `true` if either source or destination IP matches (if enabled) pub fn matches(&self, src_ip: &IpAddr, dst_ip: &IpAddr) -> bool { let src_match = if self.check_source { match src_ip { IpAddr::V4(v4) => self.ipv4_addresses.contains(v4), IpAddr::V6(v6) => self.ipv6_addresses.contains(v6), } } else { false }; let dst_match = if self.check_destination { match dst_ip { IpAddr::V4(v4) => self.ipv4_addresses.contains(v4), IpAddr::V6(v6) => self.ipv6_addresses.contains(v6), } } else { false }; src_match || dst_match } } /// Subnet filter configuration (CIDR notation) /// /// Filters packets based on subnet membership using CIDR notation. /// Supports both IPv4 and IPv6 subnets. /// /// # Examples /// /// ```rust /// use huginn_net_http::SubnetFilter; /// /// // Allow only private networks /// let filter = SubnetFilter::new() /// .allow("192.168.0.0/16").unwrap() /// .allow("10.0.0.0/8").unwrap(); /// /// // IPv6 subnet /// let filter = SubnetFilter::new() /// .allow("2001:db8::/32").unwrap(); /// ``` #[derive(Debug, Clone, Default)] pub struct SubnetFilter { /// IPv4 subnets to match pub ipv4_subnets: Vec<Ipv4Network>, /// IPv6 subnets to match pub ipv6_subnets: Vec<Ipv6Network>, /// Check source, destination, or both? pub check_source: bool, pub check_destination: bool, } impl SubnetFilter { /// Create a new subnet filter that checks both source and destination by default pub fn new() -> Self { Self { check_source: true, check_destination: true, ..Default::default() } } /// Add a subnet in CIDR notation /// /// # Errors /// /// Returns an error if the CIDR notation is invalid /// /// # Examples /// /// ```rust /// use huginn_net_http::SubnetFilter; /// /// let filter = SubnetFilter::new() /// .allow("192.168.1.0/24").unwrap(); /// ``` pub fn allow(mut self, cidr: &str) -> Result<Self, String> { let network: IpNetwork = cidr.parse().map_err(|e| format!("Invalid CIDR: {e}"))?; match network { IpNetwork::V4(v4) => self.ipv4_subnets.push(v4), IpNetwork::V6(v6) => self.ipv6_subnets.push(v6), } Ok(self) } /// Add multiple subnets /// /// # Errors /// /// Returns an error if any CIDR notation is invalid /// /// # Examples /// /// ```rust /// use huginn_net_http::SubnetFilter; /// /// let filter = SubnetFilter::new() /// .allow_list(vec!["192.168.0.0/16", "10.0.0.0/8", "172.16.0.0/12"]) /// .unwrap(); /// ``` pub fn allow_list(mut self, cidrs: Vec<&str>) -> Result<Self, String> { for cidr in cidrs { self = self.allow(cidr)?; } Ok(self) } /// Only check source addresses /// /// By default, both source and destination are checked. pub fn source_only(mut self) -> Self { self.check_source = true; self.check_destination = false; self } /// Only check destination addresses /// /// By default, both source and destination are checked. pub fn destination_only(mut self) -> Self { self.check_source = false; self.check_destination = true; self } /// Check if packet matches subnet filter /// /// # Returns /// /// `true` if either source or destination IP is in any of the subnets (if enabled) pub fn matches(&self, src_ip: &IpAddr, dst_ip: &IpAddr) -> bool { let src_match = if self.check_source { match src_ip { IpAddr::V4(v4) => self.ipv4_subnets.iter().any(|net| net.contains(*v4)), IpAddr::V6(v6) => self.ipv6_subnets.iter().any(|net| net.contains(*v6)), } } else { false }; let dst_match = if self.check_destination { match dst_ip { IpAddr::V4(v4) => self.ipv4_subnets.iter().any(|net| net.contains(*v4)), IpAddr::V6(v6) => self.ipv6_subnets.iter().any(|net| net.contains(*v6)), } } else { false }; src_match || dst_match } } /// Combined filter configuration /// /// Combines port, IP, and subnet filters with a filter mode (Allow/Deny). /// All enabled filters must pass for a packet to be processed. /// /// # Examples /// /// ```rust /// use huginn_net_http::{FilterConfig, FilterMode, PortFilter, SubnetFilter}; /// /// let filter = FilterConfig::new() /// .mode(FilterMode::Allow) /// .with_port_filter(PortFilter::new().destination(443)) /// .with_subnet_filter( /// SubnetFilter::new() /// .allow("192.168.0.0/16") /// .unwrap() /// ); /// ``` #[derive(Debug, Clone, Default)] pub struct FilterConfig { pub port_filter: Option<PortFilter>, pub ip_filter: Option<IpFilter>, pub subnet_filter: Option<SubnetFilter>, pub mode: FilterMode, } impl FilterConfig { /// Create a new empty filter configuration pub fn new() -> Self { Self::default() } /// Set filter mode (Allow/Deny) /// /// # Examples /// /// ``` /// use huginn_net_http::{FilterConfig, FilterMode}; /// /// // Allowlist mode (default) - only matching packets pass /// let filter = FilterConfig::new().mode(FilterMode::Allow); /// /// // Denylist mode - matching packets are blocked /// let filter = FilterConfig::new().mode(FilterMode::Deny); /// ``` pub fn mode(mut self, mode: FilterMode) -> Self { self.mode = mode; self } /// Add port filter /// /// # Examples /// /// ```rust /// use huginn_net_http::{FilterConfig, PortFilter}; /// /// let filter = FilterConfig::new() /// .with_port_filter(PortFilter::new().destination(443)); /// ``` pub fn with_port_filter(mut self, filter: PortFilter) -> Self { self.port_filter = Some(filter); self } /// Add IP filter /// /// # Examples /// /// ```rust /// use huginn_net_http::{FilterConfig, IpFilter}; /// /// let filter = FilterConfig::new() /// .with_ip_filter( /// IpFilter::new() /// .allow("8.8.8.8") /// .unwrap() /// ); /// ``` pub fn with_ip_filter(mut self, filter: IpFilter) -> Self { self.ip_filter = Some(filter); self } /// Add subnet filter /// /// # Examples /// /// ```rust /// use huginn_net_http::{FilterConfig, SubnetFilter}; /// /// let filter = FilterConfig::new() /// .with_subnet_filter( /// SubnetFilter::new() /// .allow("192.168.0.0/16") /// .unwrap() /// ); /// ``` pub fn with_subnet_filter(mut self, filter: SubnetFilter) -> Self { self.subnet_filter = Some(filter); self } /// Check if packet should be processed based on filters (userspace filtering) /// /// This method performs filtering in userspace after packets reach the application. /// It extracts IP addresses and ports from packet headers and applies the configured /// filters (port, IP, subnet) according to the filter mode (Allow/Deny). /// /// # Returns /// /// - `true`: Packet passes all filters (should be processed) /// - `false`: Packet blocked by filters (should be dropped) /// /// # Logic /// /// - If no filters are configured, all packets pass /// - In Allow mode: packet must match ALL configured filters /// - In Deny mode: packet must NOT match ALL configured filters pub fn should_process( &self, src_ip: &IpAddr, dst_ip: &IpAddr, src_port: u16, dst_port: u16, ) -> bool { if self.port_filter.is_none() && self.ip_filter.is_none() && self.subnet_filter.is_none() { return true; } match self.mode { FilterMode::Allow => { if let Some(ref filter) = self.port_filter { if !filter.matches(src_port, dst_port) { return false; } } if let Some(ref filter) = self.ip_filter { if !filter.matches(src_ip, dst_ip) { return false; } } if let Some(ref filter) = self.subnet_filter { if !filter.matches(src_ip, dst_ip) { return false; } } true } FilterMode::Deny => { let mut all_match = true; if let Some(ref filter) = self.port_filter { all_match = all_match && filter.matches(src_port, dst_port); } if let Some(ref filter) = self.ip_filter { all_match = all_match && filter.matches(src_ip, dst_ip); } if let Some(ref filter) = self.subnet_filter { all_match = all_match && filter.matches(src_ip, dst_ip); } !all_match } } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/packet_hash.rs
huginn-net-http/src/packet_hash.rs
//! Packet hashing utilities for worker assignment in parallel processing. //! //! This module provides flow-based hashing for HTTP traffic, ensuring that all packets //! from the same flow (src_ip, dst_ip, src_port, dst_port) are consistently routed to //! the same worker thread for proper request/response tracking. use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; /// Computes worker assignment based on HTTP flow hash. /// /// Hashes the complete flow (src_ip, dst_ip, src_port, dst_port) to ensure /// all packets from the same connection go to the same worker for proper /// request/response tracking. pub fn hash_flow(packet: &[u8], num_workers: usize) -> usize { // Skip Ethernet header (14 bytes) if present let ip_start: usize = if packet.len() > 14 && ((packet[12] == 0x08 && packet[13] == 0x00) || (packet[12] == 0x86 && packet[13] == 0xDD)) { 14 } else { 0 // Raw IP packet }; let min_length = ip_start.saturating_add(40); // IP header + TCP header minimum if packet.len() < min_length { // Packet too short, use fallback hash return fallback_hash(packet, num_workers); } let ip_packet = &packet[ip_start..]; let version = (ip_packet[0] >> 4) & 0x0F; match version { 4 => hash_ipv4_flow(ip_packet, num_workers), 6 => hash_ipv6_flow(ip_packet, num_workers), _ => fallback_hash(packet, num_workers), } } /// Hashes IPv4 flow (src_ip, dst_ip, src_port, dst_port). fn hash_ipv4_flow(ip_packet: &[u8], num_workers: usize) -> usize { if ip_packet.len() < 20 { return fallback_hash(ip_packet, num_workers); } // Check if protocol is TCP (6) let protocol = ip_packet[9]; if protocol != 6 { // Not TCP, hash source IP only let src_ip = &ip_packet[12..16]; return hash_bytes(src_ip).checked_rem(num_workers).unwrap_or(0); } // IPv4 header is variable length (IHL field) let ihl = (ip_packet[0] & 0x0F) as usize; let ip_header_len = ihl.saturating_mul(4); if ip_packet.len() < ip_header_len.saturating_add(4) { // TCP header not fully present, hash IP only let src_ip = &ip_packet[12..16]; return hash_bytes(src_ip).checked_rem(num_workers).unwrap_or(0); } // Extract: src_ip, dst_ip, src_port, dst_port let src_ip = &ip_packet[12..16]; let dst_ip = &ip_packet[16..20]; let tcp_header = &ip_packet[ip_header_len..]; let src_port = u16::from_be_bytes([tcp_header[0], tcp_header[1]]); let dst_port = u16::from_be_bytes([tcp_header[2], tcp_header[3]]); let mut hasher = DefaultHasher::new(); src_ip.hash(&mut hasher); dst_ip.hash(&mut hasher); src_port.hash(&mut hasher); dst_port.hash(&mut hasher); (hasher.finish() as usize) .checked_rem(num_workers) .unwrap_or(0) } /// Hashes IPv6 flow (src_ip, dst_ip, src_port, dst_port). fn hash_ipv6_flow(ip_packet: &[u8], num_workers: usize) -> usize { if ip_packet.len() < 40 { return fallback_hash(ip_packet, num_workers); } // Check if next header is TCP (6) let next_header = ip_packet[6]; if next_header != 6 { // Not TCP, hash source IP only let src_ip = &ip_packet[8..24]; return hash_bytes(src_ip).checked_rem(num_workers).unwrap_or(0); } if ip_packet.len() < 44 { // TCP header not fully present, hash IP only let src_ip = &ip_packet[8..24]; return hash_bytes(src_ip).checked_rem(num_workers).unwrap_or(0); } // Extract: src_ip, dst_ip, src_port, dst_port let src_ip = &ip_packet[8..24]; let dst_ip = &ip_packet[24..40]; let tcp_header = &ip_packet[40..]; let src_port = u16::from_be_bytes([tcp_header[0], tcp_header[1]]); let dst_port = u16::from_be_bytes([tcp_header[2], tcp_header[3]]); let mut hasher = DefaultHasher::new(); src_ip.hash(&mut hasher); dst_ip.hash(&mut hasher); src_port.hash(&mut hasher); dst_port.hash(&mut hasher); (hasher.finish() as usize) .checked_rem(num_workers) .unwrap_or(0) } /// Hashes a byte slice using DefaultHasher. fn hash_bytes(bytes: &[u8]) -> usize { let mut hasher = DefaultHasher::new(); bytes.hash(&mut hasher); hasher.finish() as usize } /// Fallback hash for invalid packets. /// /// Used when a packet is too short, malformed, or has an unknown IP version. /// Instead of discarding the packet or crashing, we hash the entire packet contents /// to distribute it to a worker. This sacrifices per-flow state consistency /// for that specific packet, but ensures robustness in production environments /// with corrupted traffic, fragmentation issues, or malicious crafted packets. /// /// Note: This is specific to HTTP's hash-based flow routing. fn fallback_hash(packet: &[u8], num_workers: usize) -> usize { hash_bytes(packet).checked_rem(num_workers).unwrap_or(0) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http2_parser.rs
huginn-net-http/src/http2_parser.rs
use crate::http; use crate::http_common::{HeaderSource, HttpCookie, HttpHeader, ParsingMetadata}; use hpack_patched::Decoder; use std::cell::RefCell; use std::collections::HashMap; use std::time::Instant; pub const HTTP2_CONNECTION_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; #[derive(Debug, Clone, PartialEq)] #[repr(u8)] pub enum Http2FrameType { Data = 0x0, Headers = 0x1, Priority = 0x2, RstStream = 0x3, Settings = 0x4, PushPromise = 0x5, Ping = 0x6, GoAway = 0x7, WindowUpdate = 0x8, Continuation = 0x9, Unknown(u8), } impl From<u8> for Http2FrameType { fn from(frame_type: u8) -> Self { match frame_type { 0x0 => Http2FrameType::Data, 0x1 => Http2FrameType::Headers, 0x2 => Http2FrameType::Priority, 0x3 => Http2FrameType::RstStream, 0x4 => Http2FrameType::Settings, 0x5 => Http2FrameType::PushPromise, 0x6 => Http2FrameType::Ping, 0x7 => Http2FrameType::GoAway, 0x8 => Http2FrameType::WindowUpdate, 0x9 => Http2FrameType::Continuation, other => Http2FrameType::Unknown(other), } } } #[derive(Debug, Clone)] pub struct Http2Frame { pub frame_type: Http2FrameType, pub stream_id: u32, pub flags: u8, pub payload: Vec<u8>, pub length: u32, } impl Http2Frame { /// Creates a new HTTP/2 frame /// /// # Parameters /// - `frame_type_byte`: Raw frame type byte (0x0-0x9 for standard types) /// - `flags`: Frame flags byte /// - `stream_id`: Stream identifier /// - `payload`: Frame payload data /// /// # Example /// ```no_run /// use huginn_net_http::Http2Frame; /// /// // Create a SETTINGS frame (type 0x4) /// let frame = Http2Frame::new(0x4, 0x0, 0, vec![0x00, 0x03, 0x00, 0x00, 0x00, 0x64]); /// ``` #[must_use] pub fn new(frame_type_byte: u8, flags: u8, stream_id: u32, payload: Vec<u8>) -> Self { let length = payload.len() as u32; Self { frame_type: Http2FrameType::from(frame_type_byte), stream_id, flags, payload, length, } } /// Returns the total size of the frame in bytes (header + payload) /// /// HTTP/2 frames have a 9-byte header (3 bytes length + 1 byte type + 1 byte flags + 4 bytes stream ID) /// followed by the payload. /// /// # Returns /// The total size of the frame: 9 bytes (header) + payload length /// /// # Example /// ```no_run /// use huginn_net_http::Http2Frame; /// /// let frame = Http2Frame::new(0x4, 0x0, 0, vec![0x00, 0x03, 0x00, 0x00, 0x00, 0x64]); /// assert_eq!(frame.total_size(), 9 + 6); // 9 bytes header + 6 bytes payload /// ``` #[must_use] pub fn total_size(&self) -> usize { 9_usize.saturating_add(self.length as usize) } } #[derive(Debug, Clone, Default)] pub struct Http2Settings { pub header_table_size: Option<u32>, pub enable_push: Option<bool>, pub max_concurrent_streams: Option<u32>, pub initial_window_size: Option<u32>, pub max_frame_size: Option<u32>, pub max_header_list_size: Option<u32>, } #[derive(Debug, Clone)] pub struct Http2Stream { pub stream_id: u32, pub headers: Vec<HttpHeader>, pub method: Option<String>, pub path: Option<String>, pub authority: Option<String>, pub scheme: Option<String>, pub status: Option<u16>, } pub struct Http2Config { pub max_frame_size: u32, pub max_streams: u32, pub enable_hpack: bool, pub strict_parsing: bool, } impl Default for Http2Config { fn default() -> Self { Self { max_frame_size: 16384, max_streams: 100, enable_hpack: false, strict_parsing: false, } } } #[derive(Debug, Clone)] pub struct Http2Request { pub method: String, pub path: String, pub authority: Option<String>, pub scheme: Option<String>, pub version: http::Version, pub headers: Vec<HttpHeader>, pub cookies: Vec<HttpCookie>, pub referer: Option<String>, pub stream_id: u32, pub parsing_metadata: ParsingMetadata, pub frame_sequence: Vec<Http2FrameType>, pub settings: Http2Settings, } #[derive(Debug, Clone)] pub struct Http2Response { pub status: u16, pub version: http::Version, pub headers: Vec<HttpHeader>, pub stream_id: u32, pub parsing_metadata: ParsingMetadata, pub frame_sequence: Vec<Http2FrameType>, pub server: Option<String>, pub content_type: Option<String>, } #[derive(Debug, Clone)] pub enum Http2ParseError { InvalidPreface, InvalidFrameHeader, InvalidFrameLength(u32), InvalidStreamId(u32), FrameTooLarge(u32), MissingRequiredHeaders, InvalidPseudoHeader(String), IncompleteFrame, InvalidUtf8, UnsupportedFeature(String), HpackDecodingFailed, } impl std::fmt::Display for Http2ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::InvalidPreface => write!(f, "Invalid HTTP/2 connection preface"), Self::InvalidFrameHeader => write!(f, "Invalid HTTP/2 frame header"), Self::InvalidFrameLength(len) => write!(f, "Invalid frame length: {len}"), Self::InvalidStreamId(id) => write!(f, "Invalid stream ID: {id}"), Self::FrameTooLarge(size) => write!(f, "Frame too large: {size} bytes"), Self::MissingRequiredHeaders => write!(f, "Missing required pseudo-headers"), Self::InvalidPseudoHeader(name) => write!(f, "Invalid pseudo-header: {name}"), Self::IncompleteFrame => write!(f, "Incomplete HTTP/2 frame"), Self::InvalidUtf8 => write!(f, "Invalid UTF-8 in HTTP/2 data"), Self::UnsupportedFeature(feature) => write!(f, "Unsupported feature: {feature}"), Self::HpackDecodingFailed => write!(f, "HPACK decoding failed"), } } } impl std::error::Error for Http2ParseError {} /// HTTP/2 Protocol Parser /// /// Provides parsing capabilities for HTTP/2 requests and responses according to RFC 7540. /// Supports HPACK header compression and handles various frame types. /// /// # Thread Safety /// /// **This parser is NOT thread-safe.** Each thread should create its own instance. /// The internal HPACK decoder maintains state and uses `RefCell` for interior mutability. pub struct Http2Parser<'a> { config: Http2Config, hpack_decoder: RefCell<Decoder<'a>>, } impl<'a> Default for Http2Parser<'a> { fn default() -> Self { Self::new() } } impl<'a> Http2Parser<'a> { pub fn new() -> Self { Self { config: Http2Config::default(), hpack_decoder: RefCell::new(Decoder::new()) } } pub fn with_config(config: Http2Config) -> Self { Self { config, hpack_decoder: RefCell::new(Decoder::new()) } } /// Parse HTTP/2 request from binary data pub fn parse_request(&self, data: &[u8]) -> Result<Option<Http2Request>, Http2ParseError> { let start_time = Instant::now(); if !self.has_http2_preface(data) { return Err(Http2ParseError::InvalidPreface); } let frame_data = &data[HTTP2_CONNECTION_PREFACE.len()..]; let frames = self.parse_frames(frame_data)?; if frames.is_empty() { return Ok(None); } let Some(stream_id) = self.find_primary_stream(&frames) else { return Ok(None); }; let stream = self.build_stream(stream_id, &frames)?; let method = stream .method .ok_or(Http2ParseError::MissingRequiredHeaders)?; let path = stream.path.ok_or(Http2ParseError::MissingRequiredHeaders)?; let parsing_time = start_time.elapsed().as_nanos() as u64; let frame_sequence: Vec<Http2FrameType> = frames.iter().map(|f| f.frame_type.clone()).collect(); let mut headers = Vec::new(); let mut headers_map = HashMap::new(); let mut referer: Option<String> = None; let mut cookie_headers: Vec<&HttpHeader> = Vec::new(); for header in &stream.headers { let header_name_lower = header.name.to_lowercase(); if header_name_lower == "cookie" { cookie_headers.push(header); } else if header_name_lower == "referer" { if let Some(ref value) = header.value { referer = Some(value.clone()); } } else { if let Some(ref value) = header.value { headers_map.insert(header_name_lower, value.clone()); } // Clone the header to move into the filtered headers vec headers.push(header.clone()); } } let cookies = self.parse_cookies_from_headers(&cookie_headers); let metadata = ParsingMetadata { header_count: headers.len(), duplicate_headers: Vec::new(), case_variations: HashMap::new(), parsing_time_ns: parsing_time, has_malformed_headers: false, request_line_length: 0, total_headers_length: headers .iter() .map(|h| { h.name .len() .saturating_add(h.value.as_ref().map_or(0, |v| v.len())) }) .sum(), }; Ok(Some(Http2Request { method, path, authority: stream.authority, scheme: stream.scheme, version: http::Version::V20, headers, cookies, referer, stream_id, parsing_metadata: metadata, frame_sequence, settings: self.extract_settings(&frames), })) } /// Parse HTTP/2 response from binary data pub fn parse_response(&self, data: &[u8]) -> Result<Option<Http2Response>, Http2ParseError> { let start_time = Instant::now(); let frames = self.parse_frames(data)?; if frames.is_empty() { return Ok(None); } let Some(stream_id) = self.find_primary_stream(&frames) else { return Ok(None); }; let stream = self.build_stream(stream_id, &frames)?; let status = stream .status .ok_or(Http2ParseError::MissingRequiredHeaders)?; let parsing_time = start_time.elapsed().as_nanos() as u64; let frame_sequence: Vec<Http2FrameType> = frames.iter().map(|f| f.frame_type.clone()).collect(); let mut headers_map = HashMap::new(); for header in &stream.headers { if let Some(ref value) = header.value { headers_map.insert(header.name.to_lowercase(), value.clone()); } } let metadata = ParsingMetadata { header_count: stream.headers.len(), duplicate_headers: Vec::new(), case_variations: HashMap::new(), parsing_time_ns: parsing_time, has_malformed_headers: false, request_line_length: 0, total_headers_length: stream .headers .iter() .map(|h| { h.name .len() .saturating_add(h.value.as_ref().map_or(0, |v| v.len())) }) .sum(), }; Ok(Some(Http2Response { status, version: http::Version::V20, headers: stream.headers, stream_id, parsing_metadata: metadata, frame_sequence, server: headers_map.get("server").cloned(), content_type: headers_map.get("content-type").cloned(), })) } fn has_http2_preface(&self, data: &[u8]) -> bool { data.starts_with(HTTP2_CONNECTION_PREFACE) } /// Parse HTTP/2 frames from raw data /// /// Parses all frames from the given data, handling connection preface if present. /// Returns a vector of parsed frames or an error if parsing fails. pub fn parse_frames(&self, data: &[u8]) -> Result<Vec<Http2Frame>, Http2ParseError> { let mut frames = Vec::new(); let mut remaining = data; while remaining.len() >= 9 { // Check if we have enough data for the complete frame let frame_length = u32::from_be_bytes([0, remaining[0], remaining[1], remaining[2]]); let frame_total_size = match usize::try_from(9_u32.saturating_add(frame_length)) { Ok(size) => size, Err(_) => break, // Frame too large, skip remaining data }; if remaining.len() < frame_total_size { // Incomplete frame at the end, stop parsing here break; } match self.parse_single_frame(remaining) { Ok((rest, frame)) => { frames.push(frame); remaining = rest; } Err(_) => { // Skip this frame and continue break; } } } Ok(frames) } /// Parse HTTP/2 frames from raw data and return the number of bytes consumed /// /// This is a convenience method that returns both the parsed frames and the number of bytes /// consumed from the input buffer. Useful for tracking parsing progress when processing /// incremental data. /// /// # Parameters /// - `data`: Raw HTTP/2 frame data (may include connection preface) /// /// # Returns /// - `Ok((frames, bytes_consumed))` on success /// - `Err(Http2ParseError)` on parsing failure /// /// # Example /// ```no_run /// use huginn_net_http::http2_parser::Http2Parser; /// /// let parser = Http2Parser::new(); /// let data = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\x00\x00\x06\x04\x00\x00\x00\x00\x00"; /// match parser.parse_frames_with_offset(data) { /// Ok((frames, bytes_consumed)) => { /// println!("Parsed {} frames, consumed {} bytes", frames.len(), bytes_consumed); /// } /// Err(e) => eprintln!("Parsing error: {:?}", e), /// } /// ``` pub fn parse_frames_with_offset( &self, data: &[u8], ) -> Result<(Vec<Http2Frame>, usize), Http2ParseError> { let frames = self.parse_frames(data)?; let bytes_consumed: usize = frames.iter().map(|f| f.total_size()).sum(); Ok((frames, bytes_consumed)) } /// Parse HTTP/2 frames from raw data, automatically skipping the connection preface if present /// /// This is a convenience method that handles the HTTP/2 connection preface automatically, /// making it easier to parse frames from raw connection data. /// /// # Parameters /// - `data`: Raw HTTP/2 frame data (may include connection preface) /// /// # Returns /// - `Ok((frames, bytes_consumed))` on success, where `bytes_consumed` includes the preface if present /// - `Err(Http2ParseError)` on parsing failure /// /// # Example /// ```no_run /// use huginn_net_http::http2_parser::Http2Parser; /// /// let parser = Http2Parser::new(); /// let data = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\x00\x00\x06\x04\x00\x00\x00\x00\x00"; /// match parser.parse_frames_skip_preface(data) { /// Ok((frames, bytes_consumed)) => { /// println!("Parsed {} frames, consumed {} bytes (including preface)", frames.len(), bytes_consumed); /// } /// Err(e) => eprintln!("Parsing error: {:?}", e), /// } /// ``` pub fn parse_frames_skip_preface( &self, data: &[u8], ) -> Result<(Vec<Http2Frame>, usize), Http2ParseError> { let start = if data.starts_with(HTTP2_CONNECTION_PREFACE) { HTTP2_CONNECTION_PREFACE.len() } else { 0 }; let (frames, bytes_consumed) = self.parse_frames_with_offset(&data[start..])?; Ok((frames, start.saturating_add(bytes_consumed))) } fn parse_single_frame<'b>( &self, data: &'b [u8], ) -> Result<(&'b [u8], Http2Frame), Http2ParseError> { if data.len() < 9 { return Err(Http2ParseError::IncompleteFrame); } let length = u32::from_be_bytes([0, data[0], data[1], data[2]]); let frame_type_byte = data[3]; let flags = data[4]; let stream_id = u32::from_be_bytes([data[5], data[6], data[7], data[8]]) & 0x7FFF_FFFF; if length > self.config.max_frame_size { return Err(Http2ParseError::FrameTooLarge(length)); } let frame_total_size = match usize::try_from(9_u32.saturating_add(length)) { Ok(size) => size, Err(_) => return Err(Http2ParseError::FrameTooLarge(length)), }; if data.len() < frame_total_size { return Err(Http2ParseError::IncompleteFrame); } let payload_start = 9; let payload_end = frame_total_size; let payload = data[payload_start..payload_end].to_vec(); let frame = Http2Frame { frame_type: Http2FrameType::from(frame_type_byte), stream_id, flags, payload, length, }; Ok((&data[payload_end..], frame)) } fn find_primary_stream(&self, frames: &[Http2Frame]) -> Option<u32> { for frame in frames { if frame.stream_id > 0 && frame.frame_type == Http2FrameType::Headers { return Some(frame.stream_id); } } None } fn build_stream( &self, stream_id: u32, frames: &[Http2Frame], ) -> Result<Http2Stream, Http2ParseError> { let mut headers = Vec::new(); let mut method = None; let mut path = None; let mut authority = None; let mut scheme = None; let mut status = None; let stream_frames: Vec<&Http2Frame> = frames.iter().filter(|f| f.stream_id == stream_id).collect(); for frame in stream_frames { match frame.frame_type { Http2FrameType::Headers | Http2FrameType::Continuation => { let frame_headers = self.parse_headers_payload(&frame.payload)?; for header in frame_headers { match header.name.as_str() { ":method" => method = Some(header.value.clone().unwrap_or_default()), ":path" => path = Some(header.value.clone().unwrap_or_default()), ":authority" => { authority = Some(header.value.clone().unwrap_or_default()) } ":scheme" => scheme = Some(header.value.clone().unwrap_or_default()), ":status" => { status = header.value.as_ref().and_then(|v| v.parse().ok()) } _ => headers.push(header), } } } _ => {} } } Ok(Http2Stream { stream_id, headers, method, path, authority, scheme, status }) } fn parse_headers_payload(&self, payload: &[u8]) -> Result<Vec<HttpHeader>, Http2ParseError> { let headers = self .hpack_decoder .borrow_mut() .decode(payload) .map_err(|_| Http2ParseError::HpackDecodingFailed)?; let mut http_headers = Vec::new(); for (position, (name, value)) in headers.iter().enumerate() { let name_str = String::from_utf8_lossy(name).to_string(); let value_str = String::from_utf8_lossy(value); let value_opt = if value_str.is_empty() { None } else { Some(value_str.to_string()) }; http_headers.push(HttpHeader { name: name_str, value: value_opt, position, source: HeaderSource::Http2Header, }); } Ok(http_headers) } fn extract_settings(&self, frames: &[Http2Frame]) -> Http2Settings { let mut settings = Http2Settings::default(); for frame in frames { if frame.frame_type == Http2FrameType::Settings { let payload = &frame.payload; for chunk in payload.chunks_exact(6) { if chunk.len() == 6 { let id = u16::from_be_bytes([chunk[0], chunk[1]]); let value = u32::from_be_bytes([chunk[2], chunk[3], chunk[4], chunk[5]]); match id { 1 => settings.header_table_size = Some(value), 2 => settings.enable_push = Some(value != 0), 3 => settings.max_concurrent_streams = Some(value), 4 => settings.initial_window_size = Some(value), 5 => settings.max_frame_size = Some(value), 6 => settings.max_header_list_size = Some(value), _ => {} } } } } } settings } /// HTTP/2 cookie parsing - handles multiple cookie headers according to RFC 7540 pub fn parse_cookies_from_headers(&self, cookie_headers: &[&HttpHeader]) -> Vec<HttpCookie> { let mut cookies = Vec::new(); let mut position = 0; for header in cookie_headers { if let Some(ref cookie_value) = header.value { // Each cookie header can contain multiple cookies separated by '; ' for cookie_str in cookie_value.split(';') { let cookie_str = cookie_str.trim(); if cookie_str.is_empty() { continue; } if let Some(eq_pos) = cookie_str.find('=') { let name = cookie_str[..eq_pos].trim().to_string(); let value = Some( cookie_str .get(eq_pos.saturating_add(1)..) .unwrap_or("") .trim() .to_string(), ); cookies.push(HttpCookie { name, value, position }); } else { cookies.push(HttpCookie { name: cookie_str.to_string(), value: None, position, }); } position = position.saturating_add(1); } } } cookies } } pub fn is_http2_traffic(data: &[u8]) -> bool { data.starts_with(HTTP2_CONNECTION_PREFACE) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/signature_matcher.rs
huginn-net-http/src/signature_matcher.rs
use crate::http; use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; use huginn_net_db::db_matching_trait::FingerprintDb; use huginn_net_db::{Database, Label}; pub struct SignatureMatcher<'a> { database: &'a Database, } impl<'a> SignatureMatcher<'a> { pub fn new(database: &'a Database) -> Self { Self { database } } pub fn matching_by_http_request( &self, signature: &ObservableHttpRequest, ) -> Option<(&'a Label, &'a http::Signature, f32)> { self.database .http_request .find_best_match(&signature.matching) } pub fn matching_by_http_response( &self, signature: &ObservableHttpResponse, ) -> Option<(&'a Label, &'a http::Signature, f32)> { self.database .http_response .find_best_match(&signature.matching) } pub fn matching_by_user_agent( &self, user_agent: String, ) -> Option<(&'a String, &'a Option<String>)> { for (ua, ua_family) in &self.database.ua_os { if user_agent.contains(ua) { return Some((ua, ua_family)); } } None } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http2_process.rs
huginn-net-http/src/http2_process.rs
use crate::error::HuginnNetHttpError; use crate::http::Header; use crate::http_common::HttpProcessor; use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; use crate::{http, http2_parser, http_common, http_languages}; use tracing::debug; /// HTTP/2 Protocol Processor /// /// Implements the HttpProcessor trait for HTTP/2 protocol. /// Handles both request and response processing with proper protocol detection. /// Contains a parser instance that is created once and reused. pub struct Http2Processor { parser: http2_parser::Http2Parser<'static>, } impl Http2Processor { pub fn new() -> Self { Self { parser: http2_parser::Http2Parser::new() } } } impl Default for Http2Processor { fn default() -> Self { Self::new() } } impl HttpProcessor for Http2Processor { fn can_process_request(&self, data: &[u8]) -> bool { // VERY SPECIFIC: HTTP/2 requests MUST start with exact connection preface if data.len() < 24 { // Minimum for preface return false; } // SPECIFIC: Must start with exact HTTP/2 connection preface http2_parser::is_http2_traffic(data) } fn can_process_response(&self, data: &[u8]) -> bool { // VERY SPECIFIC: HTTP/2 responses are frame-based, not text-based if data.len() < 9 { // Minimum frame header size return false; } // SPECIFIC: Must NOT look like HTTP/1.x first let data_str = String::from_utf8_lossy(&data[..data.len().min(20)]); if data_str.starts_with("HTTP/1.") { return false; } // SPECIFIC: Must look like valid HTTP/2 frame looks_like_http2_response(data) } fn has_complete_data(&self, data: &[u8]) -> bool { has_complete_data(data) } fn process_request( &self, data: &[u8], ) -> Result<Option<ObservableHttpRequest>, HuginnNetHttpError> { parse_http2_request(data, &self.parser) } fn process_response( &self, data: &[u8], ) -> Result<Option<ObservableHttpResponse>, HuginnNetHttpError> { parse_http2_response(data, &self.parser) } fn supported_version(&self) -> http::Version { http::Version::V20 } fn name(&self) -> &'static str { "HTTP/2" } } pub fn convert_http2_request_to_observable( req: http2_parser::Http2Request, ) -> ObservableHttpRequest { // Create map once for all lookups (only headers with values) let mut headers_map = std::collections::HashMap::new(); for header in &req.headers { if let Some(ref value) = header.value { headers_map.insert(header.name.to_lowercase(), value.as_str()); } } let lang = headers_map .get("accept-language") .and_then(|accept_language| { http_languages::get_highest_quality_language(accept_language.to_string()) }); let headers_in_order = convert_http2_headers_to_http_format(&req.headers, true); let headers_absent = build_absent_headers_from_http2(&req.headers, true); let user_agent = headers_map.get("user-agent").map(|s| s.to_string()); ObservableHttpRequest { matching: huginn_net_db::observable_signals::HttpRequestObservation { version: req.version, horder: headers_in_order, habsent: headers_absent, expsw: extract_traffic_classification(user_agent.as_deref()), }, lang, user_agent, headers: req.headers, cookies: req.cookies.clone(), referer: req.referer.clone(), method: Some(req.method), uri: Some(req.path), } } pub fn convert_http2_response_to_observable( res: http2_parser::Http2Response, ) -> ObservableHttpResponse { let headers_in_order = convert_http2_headers_to_http_format(&res.headers, false); let headers_absent = build_absent_headers_from_http2(&res.headers, false); ObservableHttpResponse { matching: huginn_net_db::observable_signals::HttpResponseObservation { version: res.version, horder: headers_in_order, habsent: headers_absent, expsw: extract_traffic_classification(res.server.as_deref()), }, headers: res.headers, status_code: Some(res.status), } } fn convert_http2_headers_to_http_format( headers: &[http_common::HttpHeader], is_request: bool, ) -> Vec<Header> { let mut headers_in_order: Vec<Header> = Vec::new(); let optional_list = if is_request { http::request_optional_headers() } else { http::response_optional_headers() }; let skip_value_list = if is_request { http::request_skip_value_headers() } else { http::response_skip_value_headers() }; for header in headers { let header_name_lower = header.name.to_lowercase(); if optional_list.contains(&header_name_lower.as_str()) { headers_in_order.push(http::Header::new(&header.name).optional()); } else if skip_value_list.contains(&header_name_lower.as_str()) { headers_in_order.push(http::Header::new(&header.name)); } else { headers_in_order .push(http::Header::new(&header.name).with_optional_value(header.value.clone())); } } headers_in_order } fn build_absent_headers_from_http2( headers: &[http_common::HttpHeader], is_request: bool, ) -> Vec<Header> { let mut headers_absent: Vec<Header> = Vec::new(); let common_list: Vec<&str> = if is_request { http::request_common_headers() } else { http::response_common_headers() }; let current_headers: Vec<String> = headers.iter().map(|h| h.name.to_lowercase()).collect(); for header in &common_list { if !current_headers.contains(&header.to_lowercase()) { headers_absent.push(http::Header::new(header)); } } headers_absent } /// Parse HTTP/2 request and convert to ObservableHttpRequest /// /// This function parses HTTP/2 request data and converts it to an ObservableHttpRequest /// that can be used for fingerprinting and analysis. pub fn parse_http2_request( data: &[u8], parser: &http2_parser::Http2Parser, ) -> Result<Option<ObservableHttpRequest>, HuginnNetHttpError> { match parser.parse_request(data) { Ok(Some(req)) => { let observable = convert_http2_request_to_observable(req); Ok(Some(observable)) } Ok(None) => { debug!("Incomplete HTTP/2 request data"); Ok(None) } Err(e) => { debug!("Failed to parse HTTP/2 request: {}", e); Err(HuginnNetHttpError::Parse(format!("Failed to parse HTTP/2 request: {e}"))) } } } fn parse_http2_response( data: &[u8], parser: &http2_parser::Http2Parser, ) -> Result<Option<ObservableHttpResponse>, HuginnNetHttpError> { match parser.parse_response(data) { Ok(Some(res)) => { let observable = convert_http2_response_to_observable(res); Ok(Some(observable)) } Ok(None) => { debug!("Incomplete HTTP/2 response data"); Ok(None) } Err(e) => { debug!("Failed to parse HTTP/2 response: {}", e); Err(HuginnNetHttpError::Parse(format!("Failed to parse HTTP/2 response: {e}"))) } } } pub fn extract_traffic_classification(value: Option<&str>) -> String { value.unwrap_or("???").to_string() } /// Check if data looks like HTTP/2 response (frames without preface) pub fn looks_like_http2_response(data: &[u8]) -> bool { if data.len() < 9 { return false; } // HTTP/2 frame format: 3 bytes length + 1 byte type + 1 byte flags + 4 bytes stream_id let frame_length = u32::from_be_bytes([0, data[0], data[1], data[2]]); let frame_type = data[3]; // Check if frame length is more than the default max frame size if frame_length > 16384 { return false; } // Check if frame type is valid HTTP/2 frame type // Common response frame types: HEADERS(1), DATA(0), SETTINGS(4), WINDOW_UPDATE(8) matches!(frame_type, 0..=10) } /// Check if HTTP/2 data has complete frames for parsing pub fn has_complete_data(data: &[u8]) -> bool { // For requests: Must have at least the connection preface if data.starts_with(crate::http2_parser::HTTP2_CONNECTION_PREFACE) { let frame_data = &data[crate::http2_parser::HTTP2_CONNECTION_PREFACE.len()..]; return has_complete_frames(frame_data); } // For responses: No preface, check frames directly has_complete_frames(data) } /// Check if we have complete HTTP/2 frames (at least HEADERS frame) fn has_complete_frames(data: &[u8]) -> bool { let mut remaining = data; while remaining.len() >= 9 { // Parse frame header (9 bytes) let length = u32::from_be_bytes([0, remaining[0], remaining[1], remaining[2]]); let frame_type_byte = remaining[3]; let _flags = remaining[4]; let stream_id = u32::from_be_bytes([remaining[5], remaining[6], remaining[7], remaining[8]]) & 0x7FFF_FFFF; // Check if frame is complete let frame_total_size = match usize::try_from(9_u32.saturating_add(length)) { Ok(size) => size, Err(_) => return false, // Frame too large }; if remaining.len() < frame_total_size { return false; // Incomplete frame } // Check if this is a HEADERS frame (type 0x1) with a valid stream ID if frame_type_byte == 0x1 && stream_id > 0 { // We need at least one complete HEADERS frame return true; } // Move to next frame remaining = &remaining[frame_total_size..]; } false }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/error.rs
huginn-net-http/src/error.rs
use thiserror::Error; #[derive(Error, Debug)] pub enum HuginnNetHttpError { #[error("Parse error: {0}")] Parse(String), #[error("Unsupported protocol: {0}")] UnsupportedProtocol(String), #[error("Unacceptable configuration: {0}")] Misconfiguration(String), }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/parallel.rs
huginn-net-http/src/parallel.rs
//! Parallel processing support for HTTP analysis using worker pool architecture. //! //! This module provides multi-threaded packet processing with hash-based worker assignment //! to maintain HTTP flow consistency (request/response tracking). Unlike TCP which hashes //! only the source IP, HTTP hashes the complete flow (src_ip, dst_ip, src_port, dst_port) //! to ensure requests and responses from the same connection are processed by the same worker. use crate::error::HuginnNetHttpError; use crate::filter::FilterConfig; use crate::http_process::{FlowKey, HttpProcessors, TcpFlow}; use crate::packet_hash; use crate::raw_filter; use crate::{HttpAnalysisResult, SignatureMatcher}; use crossbeam_channel::{bounded, Sender}; use huginn_net_db as db; use std::fmt; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; use tracing::debug; use ttl_cache::TtlCache; /// Worker configuration parameters struct WorkerConfig { batch_size: usize, timeout_ms: u64, max_connections: usize, } /// Worker pool for parallel HTTP packet processing. pub struct WorkerPool { packet_senders: Arc<Vec<Sender<Vec<u8>>>>, result_sender: Arc<Mutex<Option<std::sync::mpsc::Sender<HttpAnalysisResult>>>>, shutdown_flag: Arc<AtomicBool>, dispatched_count: Arc<AtomicU64>, dropped_count: Arc<AtomicU64>, worker_dropped: Vec<Arc<AtomicU64>>, num_workers: usize, pub batch_size: usize, pub timeout_ms: u64, } /// Statistics for a single worker thread. #[derive(Debug, Clone)] pub struct WorkerStats { pub id: usize, pub queue_size: usize, pub dropped: u64, } /// Pool-level statistics. #[derive(Debug, Clone)] pub struct PoolStats { pub total_dispatched: u64, pub total_dropped: u64, pub workers: Vec<WorkerStats>, } /// Result of dispatching a packet to a worker. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum DispatchResult { /// Packet successfully queued for processing Queued, /// Worker queue full, packet dropped Dropped, } impl WorkerPool { /// Creates a new worker pool for HTTP analysis. /// /// # Parameters /// - `num_workers`: Number of worker threads /// - `queue_size`: Size of each worker's packet queue /// - `batch_size`: Maximum packets to process in one batch /// - `timeout_ms`: Worker receive timeout in milliseconds /// - `result_sender`: Channel to send analysis results /// - `database`: Optional signature database for matching /// - `max_connections`: Maximum HTTP flows to track per worker /// - `filter_config`: Optional filter configuration for packet filtering /// /// # Returns /// A new `WorkerPool` or an error if creation fails. #[allow(clippy::too_many_arguments)] pub fn new( num_workers: usize, queue_size: usize, batch_size: usize, timeout_ms: u64, result_sender: std::sync::mpsc::Sender<HttpAnalysisResult>, database: Option<Arc<db::Database>>, max_connections: usize, filter_config: Option<FilterConfig>, ) -> Result<Arc<Self>, HuginnNetHttpError> { if num_workers == 0 { return Err(HuginnNetHttpError::Misconfiguration( "Worker count must be at least 1".to_string(), )); } let mut packet_senders = Vec::with_capacity(num_workers); let mut worker_dropped = Vec::with_capacity(num_workers); let shutdown_flag = Arc::new(AtomicBool::new(false)); for worker_id in 0..num_workers { let (tx, rx) = bounded::<Vec<u8>>(queue_size); packet_senders.push(tx); let result_sender_clone = result_sender.clone(); let db_clone = database.clone(); let dropped = Arc::new(AtomicU64::new(0)); worker_dropped.push(Arc::clone(&dropped)); let shutdown_flag_clone = Arc::clone(&shutdown_flag); let worker_filter = filter_config.clone(); thread::Builder::new() .name(format!("http-worker-{worker_id}")) .spawn(move || { Self::worker_loop( worker_id, rx, result_sender_clone, db_clone, dropped, shutdown_flag_clone, WorkerConfig { batch_size, timeout_ms, max_connections }, worker_filter, ) }) .map_err(|e| { HuginnNetHttpError::Misconfiguration(format!( "Failed to spawn worker thread {worker_id}: {e}" )) })?; } Ok(Arc::new(Self { packet_senders: Arc::new(packet_senders), result_sender: Arc::new(Mutex::new(Some(result_sender))), shutdown_flag, dispatched_count: Arc::new(AtomicU64::new(0)), dropped_count: Arc::new(AtomicU64::new(0)), worker_dropped, num_workers, batch_size, timeout_ms, })) } /// Worker thread main loop with batching support. #[allow(clippy::too_many_arguments)] fn worker_loop( worker_id: usize, rx: crossbeam_channel::Receiver<Vec<u8>>, result_sender: std::sync::mpsc::Sender<HttpAnalysisResult>, database: Option<Arc<db::Database>>, dropped: Arc<AtomicU64>, shutdown_flag: Arc<AtomicBool>, config: WorkerConfig, filter_config: Option<FilterConfig>, ) { use crossbeam_channel::RecvTimeoutError; use std::time::Duration; debug!("HTTP worker {} started", worker_id); let matcher = database .as_ref() .map(|db| SignatureMatcher::new(db.as_ref())); let mut http_flows = TtlCache::new(config.max_connections); let http_processors = HttpProcessors::new(); let timeout = Duration::from_millis(config.timeout_ms); let mut batch = Vec::with_capacity(config.batch_size); loop { if shutdown_flag.load(Ordering::Relaxed) { debug!("HTTP worker {} received shutdown signal", worker_id); break; } // Receive first packet with timeout (blocking) match rx.recv_timeout(timeout) { Ok(packet) => { batch.push(packet); // Try to fill the batch with additional packets (non-blocking) while batch.len() < config.batch_size { match rx.try_recv() { Ok(packet) => batch.push(packet), Err(_) => break, } } // Process all packets in the batch for packet in batch.drain(..) { match Self::process_packet( &packet, &mut http_flows, &http_processors, matcher.as_ref(), filter_config.as_ref(), ) { Ok(result) => { if result_sender.send(result).is_err() { debug!("HTTP worker {} result channel closed", worker_id); return; } } Err(_) => { // Packet processing error, increment dropped count dropped.fetch_add(1, Ordering::Relaxed); } } } } Err(RecvTimeoutError::Timeout) => { if shutdown_flag.load(Ordering::Relaxed) { debug!("HTTP worker {} received shutdown signal", worker_id); break; } continue; } Err(RecvTimeoutError::Disconnected) => { debug!("HTTP worker {} channel disconnected", worker_id); break; } } } debug!("HTTP worker {} stopped", worker_id); } /// Processes a single packet within a worker thread. fn process_packet( packet: &[u8], http_flows: &mut TtlCache<FlowKey, TcpFlow>, http_processors: &HttpProcessors, matcher: Option<&SignatureMatcher>, filter: Option<&FilterConfig>, ) -> Result<HttpAnalysisResult, HuginnNetHttpError> { if let Some(filter_cfg) = filter { if !raw_filter::apply(packet, filter_cfg) { debug!("Filtered out packet before parsing"); return Ok(HttpAnalysisResult { http_request: None, http_response: None }); } } use crate::packet_parser::{parse_packet, IpPacket}; use crate::process; match parse_packet(packet) { IpPacket::Ipv4(ipv4) => { process::process_ipv4_packet(&ipv4, http_flows, http_processors, matcher) } IpPacket::Ipv6(ipv6) => { process::process_ipv6_packet(&ipv6, http_flows, http_processors, matcher) } IpPacket::None => Ok(HttpAnalysisResult { http_request: None, http_response: None }), } } pub fn dispatch(&self, packet: Vec<u8>) -> DispatchResult { // Don't accept new packets if shutting down if self.shutdown_flag.load(Ordering::Relaxed) { self.dropped_count.fetch_add(1, Ordering::Relaxed); return DispatchResult::Dropped; } let worker_id = packet_hash::hash_flow(&packet, self.num_workers); self.dispatched_count.fetch_add(1, Ordering::Relaxed); if let Some(sender) = self.packet_senders.get(worker_id) { match sender.try_send(packet) { Ok(()) => DispatchResult::Queued, Err(_) => { self.dropped_count.fetch_add(1, Ordering::Relaxed); self.worker_dropped[worker_id].fetch_add(1, Ordering::Relaxed); DispatchResult::Dropped } } } else { self.dropped_count.fetch_add(1, Ordering::Relaxed); DispatchResult::Dropped } } pub fn stats(&self) -> PoolStats { let workers = self .packet_senders .iter() .enumerate() .map(|(id, sender)| WorkerStats { id, queue_size: sender.len(), dropped: self.worker_dropped[id].load(Ordering::Relaxed), }) .collect(); PoolStats { total_dispatched: self.dispatched_count.load(Ordering::Relaxed), total_dropped: self.dropped_count.load(Ordering::Relaxed), workers, } } /// Initiates graceful shutdown of the worker pool. pub fn shutdown(&self) { // Set shutdown flag to stop workers on next timeout self.shutdown_flag.store(true, Ordering::Relaxed); // Drop result sender to signal workers if let Ok(mut sender) = self.result_sender.lock() { *sender = None; } } } impl fmt::Display for PoolStats { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "HTTP Worker Pool Statistics:")?; writeln!(f, " Total dispatched: {}", self.total_dispatched)?; writeln!(f, " Total dropped: {}", self.total_dropped)?; writeln!(f, " Workers: {}", self.workers.len())?; for worker in &self.workers { writeln!(f, " {worker}")?; } Ok(()) } } impl fmt::Display for WorkerStats { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Worker {}: queue_size={}, dropped={}", self.id, self.queue_size, self.dropped ) } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/display.rs
huginn-net-http/src/display.rs
use crate::http_common::HttpHeader; use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; use core::fmt; use huginn_net_db::display::http::HttpDisplayFormat; use huginn_net_db::http::{Header, Version}; use std::fmt::Formatter; impl HttpDisplayFormat for ObservableHttpRequest { fn get_version(&self) -> Version { self.matching.version } fn get_horder(&self) -> &[Header] { &self.matching.horder } fn get_habsent(&self) -> &[Header] { &self.matching.habsent } fn get_expsw(&self) -> &str { &self.matching.expsw } } impl HttpDisplayFormat for ObservableHttpResponse { fn get_version(&self) -> Version { self.matching.version } fn get_horder(&self) -> &[Header] { &self.matching.horder } fn get_habsent(&self) -> &[Header] { &self.matching.habsent } fn get_expsw(&self) -> &str { &self.matching.expsw } } impl fmt::Display for ObservableHttpRequest { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_http_display(f) } } impl fmt::Display for ObservableHttpResponse { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.format_http_display(f) } } impl fmt::Display for HttpHeader { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { if let Some(ref value) = self.value { write!(f, "{}={}", self.name, value) } else { write!(f, "{}", self.name) } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http1_parser.rs
huginn-net-http/src/http1_parser.rs
use crate::http; use crate::http_common::{HeaderSource, HttpCookie, HttpHeader, ParsingMetadata}; use std::collections::HashMap; use std::time::Instant; pub struct Http1Config { pub max_headers: usize, pub max_request_line_length: usize, pub max_header_length: usize, pub preserve_header_order: bool, pub parse_cookies: bool, pub strict_parsing: bool, } impl Default for Http1Config { fn default() -> Self { Self { max_headers: 100, max_request_line_length: 8192, max_header_length: 8192, preserve_header_order: true, parse_cookies: true, strict_parsing: false, } } } #[derive(Debug, Clone)] pub struct Http1Request { pub method: String, pub uri: String, pub version: http::Version, pub headers: Vec<HttpHeader>, pub cookies: Vec<HttpCookie>, pub referer: Option<String>, pub content_length: Option<usize>, pub transfer_encoding: Option<String>, pub connection: Option<String>, pub host: Option<String>, pub user_agent: Option<String>, pub accept_language: Option<String>, pub raw_request_line: String, pub parsing_metadata: ParsingMetadata, } #[derive(Debug, Clone)] pub struct Http1Response { pub version: http::Version, pub status_code: u16, pub reason_phrase: String, pub headers: Vec<HttpHeader>, pub content_length: Option<usize>, pub transfer_encoding: Option<String>, pub server: Option<String>, pub content_type: Option<String>, pub raw_status_line: String, pub parsing_metadata: ParsingMetadata, } #[derive(Debug, Clone)] pub enum Http1ParseError { InvalidRequestLine(String), InvalidStatusLine(String), InvalidVersion(String), InvalidMethod(String), InvalidStatusCode(String), HeaderTooLong(usize), TooManyHeaders(usize), MalformedHeader(String), IncompleteData, InvalidUtf8, } impl std::fmt::Display for Http1ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::InvalidRequestLine(line) => write!(f, "Invalid request line: {line}"), Self::InvalidStatusLine(line) => write!(f, "Invalid status line: {line}"), Self::InvalidVersion(version) => write!(f, "Invalid HTTP version: {version}"), Self::InvalidMethod(method) => write!(f, "Invalid HTTP method: {method}"), Self::InvalidStatusCode(code) => write!(f, "Invalid status code: {code}"), Self::HeaderTooLong(len) => write!(f, "Header too long: {len} bytes"), Self::TooManyHeaders(count) => write!(f, "Too many headers: {count}"), Self::MalformedHeader(header) => write!(f, "Malformed header: {header}"), Self::IncompleteData => write!(f, "Incomplete HTTP data"), Self::InvalidUtf8 => write!(f, "Invalid UTF-8 in HTTP data"), } } } impl std::error::Error for Http1ParseError {} /// HTTP/1.x Protocol Parser /// /// Provides parsing capabilities for HTTP/1.0 and HTTP/1.1 requests and responses according to RFC 7230. /// Supports header parsing, cookie extraction, and various configuration options for security and performance. /// /// # Thread Safety /// /// **This parser is thread-safe.** Unlike the HTTP/2 parser, this parser does not maintain internal state /// and can be safely shared between threads or used concurrently. pub struct Http1Parser { config: Http1Config, } impl Http1Parser { pub fn new() -> Self { Self { config: Http1Config::default() } } pub fn with_config(config: Http1Config) -> Self { Self { config } } pub fn parse_request(&self, data: &[u8]) -> Result<Option<Http1Request>, Http1ParseError> { let start_time = Instant::now(); let data_str = std::str::from_utf8(data).map_err(|_| Http1ParseError::InvalidUtf8)?; if !data_str.contains("\r\n\r\n") && !data_str.contains("\n\n") { return Ok(None); } let lines: Vec<&str> = if data_str.contains("\r\n") { data_str.split("\r\n").collect() } else { data_str.split('\n').collect() }; if lines.is_empty() { return Err(Http1ParseError::IncompleteData); } let (method, uri, version) = self.parse_request_line(lines[0])?; let header_end = lines .iter() .position(|line| line.is_empty()) .unwrap_or(lines.len()); let header_lines = &lines[1..header_end]; let (all_headers, parsing_metadata) = self.parse_headers(header_lines)?; let mut headers = Vec::new(); let mut headers_map = HashMap::new(); let mut cookie_header_value: Option<String> = None; let mut referer: Option<String> = None; for header in all_headers { let header_name_lower = header.name.to_lowercase(); if header_name_lower == "cookie" { if let Some(ref value) = header.value { cookie_header_value = Some(value.clone()); } } else if header_name_lower == "referer" { if let Some(ref value) = header.value { referer = Some(value.clone()); } } else { if let Some(ref value) = header.value { headers_map .entry(header_name_lower) .or_insert(value.clone()); } headers.push(header); } } let cookies = if self.config.parse_cookies { if let Some(cookie_header) = cookie_header_value { self.parse_cookies(&cookie_header) } else { Vec::new() } } else { Vec::new() }; let content_length = headers_map .get("content-length") .and_then(|v| v.parse().ok()); let parsing_time = start_time.elapsed().as_nanos() as u64; let mut final_metadata = parsing_metadata; final_metadata.parsing_time_ns = parsing_time; final_metadata.request_line_length = lines[0].len(); Ok(Some(Http1Request { method, uri, version, headers, cookies, referer, content_length, transfer_encoding: headers_map.get("transfer-encoding").cloned(), connection: headers_map.get("connection").cloned(), host: headers_map.get("host").cloned(), user_agent: headers_map.get("user-agent").cloned(), accept_language: headers_map.get("accept-language").cloned(), raw_request_line: lines[0].to_string(), parsing_metadata: final_metadata, })) } pub fn parse_response(&self, data: &[u8]) -> Result<Option<Http1Response>, Http1ParseError> { let start_time = Instant::now(); let data_str = std::str::from_utf8(data).map_err(|_| Http1ParseError::InvalidUtf8)?; if !data_str.contains("\r\n\r\n") && !data_str.contains("\n\n") { return Ok(None); } let lines: Vec<&str> = if data_str.contains("\r\n") { data_str.split("\r\n").collect() } else { data_str.split('\n').collect() }; if lines.is_empty() { return Err(Http1ParseError::IncompleteData); } let (version, status_code, reason_phrase) = self.parse_status_line(lines[0])?; let header_end = lines .iter() .position(|line| line.is_empty()) .unwrap_or(lines.len()); let header_lines = &lines[1..header_end]; let (headers, parsing_metadata) = self.parse_headers(header_lines)?; let mut headers_map = HashMap::new(); for header in &headers { if let Some(ref value) = header.value { headers_map .entry(header.name.to_lowercase()) .or_insert(value.clone()); } } let content_length = headers_map .get("content-length") .and_then(|v| v.parse().ok()); let parsing_time = start_time.elapsed().as_nanos() as u64; let mut final_metadata = parsing_metadata; final_metadata.parsing_time_ns = parsing_time; Ok(Some(Http1Response { version, status_code, reason_phrase, headers, content_length, transfer_encoding: headers_map.get("transfer-encoding").cloned(), server: headers_map.get("server").cloned(), content_type: headers_map.get("content-type").cloned(), raw_status_line: lines[0].to_string(), parsing_metadata: final_metadata, })) } fn parse_request_line( &self, line: &str, ) -> Result<(String, String, http::Version), Http1ParseError> { if line.len() > self.config.max_request_line_length { return Err(Http1ParseError::InvalidRequestLine(format!( "Request line too long: {} bytes", line.len() ))); } let parts: Vec<&str> = line.split_whitespace().collect(); if parts.len() != 3 { return Err(Http1ParseError::InvalidRequestLine(line.to_string())); } let method = parts[0].to_string(); let uri = parts[1].to_string(); let version = http::Version::parse(parts[2]) .ok_or_else(|| Http1ParseError::InvalidVersion(parts[2].to_string()))?; // HTTP/1.x parser should only accept HTTP/1.0 and HTTP/1.1 if !matches!(version, http::Version::V10 | http::Version::V11) { return Err(Http1ParseError::InvalidVersion(parts[2].to_string())); } if !self.is_valid_method(&method) { return Err(Http1ParseError::InvalidMethod(method)); } Ok((method, uri, version)) } fn parse_status_line( &self, line: &str, ) -> Result<(http::Version, u16, String), Http1ParseError> { let parts: Vec<&str> = line.splitn(3, ' ').collect(); if parts.len() < 2 { return Err(Http1ParseError::InvalidStatusLine(line.to_string())); } let version = http::Version::parse(parts[0]) .ok_or_else(|| Http1ParseError::InvalidVersion(parts[0].to_string()))?; // HTTP/1.x parser should only accept HTTP/1.0 and HTTP/1.1 if !matches!(version, http::Version::V10 | http::Version::V11) { return Err(Http1ParseError::InvalidVersion(parts[0].to_string())); } let status_code: u16 = parts[1] .parse() .map_err(|_| Http1ParseError::InvalidStatusCode(parts[1].to_string()))?; let reason_phrase = parts.get(2).unwrap_or(&"").to_string(); Ok((version, status_code, reason_phrase)) } fn parse_headers( &self, lines: &[&str], ) -> Result<(Vec<HttpHeader>, ParsingMetadata), Http1ParseError> { if lines.len() > self.config.max_headers { return Err(Http1ParseError::TooManyHeaders(lines.len())); } let mut headers = Vec::new(); let mut duplicate_headers = Vec::new(); let mut case_variations: HashMap<String, Vec<String>> = HashMap::new(); let mut has_malformed = false; let mut total_length: usize = 0; for (position, line) in lines.iter().enumerate() { if line.is_empty() { break; } total_length = total_length.saturating_add(line.len()); if line.len() > self.config.max_header_length { return Err(Http1ParseError::HeaderTooLong(line.len())); } if let Some(colon_pos) = line.find(':') { let name = line[..colon_pos].trim().to_string(); let value = line .get(colon_pos.saturating_add(1)..) .map(|v| v.trim().to_string()); if name.is_empty() { has_malformed = true; if self.config.strict_parsing { return Err(Http1ParseError::MalformedHeader(line.to_string())); } continue; } let name_lower = name.to_lowercase(); case_variations .entry(name_lower.clone()) .or_default() .push(name.clone()); if headers .iter() .any(|h: &HttpHeader| h.name.to_lowercase() == name_lower) { duplicate_headers.push(name_lower.clone()); } headers.push(HttpHeader { name, value, position, source: HeaderSource::Http1Line }); } else { has_malformed = true; if self.config.strict_parsing { return Err(Http1ParseError::MalformedHeader(line.to_string())); } } } let metadata = ParsingMetadata { header_count: headers.len(), duplicate_headers, case_variations, parsing_time_ns: 0, has_malformed_headers: has_malformed, request_line_length: 0, total_headers_length: total_length, }; Ok((headers, metadata)) } /// HTTP/1.x cookie parsing - single cookie header with '; ' separation according to RFC 6265 pub fn parse_cookies(&self, cookie_header: &str) -> Vec<HttpCookie> { let mut cookies = Vec::new(); let mut position = 0; for cookie_str in cookie_header.split(';') { let cookie_str = cookie_str.trim(); if cookie_str.is_empty() { continue; } if let Some(eq_pos) = cookie_str.find('=') { let name = cookie_str[..eq_pos].trim().to_string(); let value = Some( cookie_str .get(eq_pos.saturating_add(1)..) .unwrap_or("") .trim() .to_string(), ); cookies.push(HttpCookie { name, value, position }); } else { cookies.push(HttpCookie { name: cookie_str.to_string(), value: None, position }); } position = position.saturating_add(1); } cookies } fn is_valid_method(&self, method: &str) -> bool { matches!( method, "GET" | "POST" | "PUT" | "DELETE" | "HEAD" | "OPTIONS" | "PATCH" | "TRACE" | "CONNECT" | "PROPFIND" | "PROPPATCH" | "MKCOL" | "COPY" | "MOVE" | "LOCK" | "UNLOCK" | "MKCALENDAR" | "REPORT" ) } } impl Default for Http1Parser { fn default() -> Self { Self::new() } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/raw_filter.rs
huginn-net-http/src/raw_filter.rs
use crate::filter::FilterConfig; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use tracing::debug; /// Apply raw filter check on raw packet bytes /// /// Extracts only IPs and ports without creating full packet structures. /// This is much faster than parsing the entire packet first. /// /// # Returns /// /// - `true`: Packet should be processed (passed filter or no filter) /// - `false`: Packet should be dropped (failed filter) pub fn apply(packet: &[u8], filter: &FilterConfig) -> bool { if let Some((src_ip, dst_ip, src_port, dst_port)) = extract_quick_info(packet) { filter.should_process(&src_ip, &dst_ip, src_port, dst_port) } else { debug!("Could not extract quick info from packet"); true } } /// Extract IPs and ports without full parsing /// /// Tries multiple datalink formats (Ethernet, Raw IP, NULL) to find IP header. /// Only extracts the minimum fields needed for filtering. fn extract_quick_info(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { // Try Ethernet (most common) if let Some(info) = try_ethernet(packet) { return Some(info); } // Try Raw IP if let Some(info) = try_raw_ip(packet) { return Some(info); } // Try NULL/Loopback if let Some(info) = try_null_datalink(packet) { return Some(info); } None } /// Try to extract from Ethernet frame fn try_ethernet(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { if packet.len() < 14 { return None; } // EtherType at offset 12-13 let ethertype = u16::from_be_bytes([packet[12], packet[13]]); match ethertype { 0x0800 => extract_ipv4_info(&packet[14..]), // IPv4 0x86DD => extract_ipv6_info(&packet[14..]), // IPv6 _ => None, } } /// Try to extract from Raw IP fn try_raw_ip(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { if packet.is_empty() { return None; } // Check IP version (first 4 bits) let version = packet[0] >> 4; match version { 4 => extract_ipv4_info(packet), 6 => extract_ipv6_info(packet), _ => None, } } /// Try to extract from NULL/Loopback datalink fn try_null_datalink(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { if packet.len() < 4 { return None; } // NULL datalink has 4-byte header with address family // AF_INET = 2, AF_INET6 = 30 (on most systems) let family = u32::from_ne_bytes([packet[0], packet[1], packet[2], packet[3]]); match family { 2 => extract_ipv4_info(&packet[4..]), // AF_INET 30 | 28 => extract_ipv6_info(&packet[4..]), // AF_INET6 (varies by OS) _ => None, } } /// Extract IPv4 src/dst IPs and TCP ports (minimal parsing) fn extract_ipv4_info(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { // IPv4 header minimum: 20 bytes if packet.len() < 20 { return None; } // Check protocol (offset 9): must be TCP (6) if packet[9] != 6 { return None; } // Extract source IP (offset 12-15) let src_ip = IpAddr::V4(Ipv4Addr::new(packet[12], packet[13], packet[14], packet[15])); // Extract destination IP (offset 16-19) let dst_ip = IpAddr::V4(Ipv4Addr::new(packet[16], packet[17], packet[18], packet[19])); // Get IP header length (first 4 bits of byte 0, in 32-bit words) let ihl = (packet[0] & 0x0F) as usize; let ip_header_len = ihl.saturating_mul(4); // TCP header starts after IP header let tcp_offset = ip_header_len; if packet.len() < tcp_offset.saturating_add(4) { return None; } // Extract TCP ports (first 4 bytes of TCP header) let src_port = u16::from_be_bytes([packet[tcp_offset], packet[tcp_offset.saturating_add(1)]]); let dst_port = u16::from_be_bytes([ packet[tcp_offset.saturating_add(2)], packet[tcp_offset.saturating_add(3)], ]); Some((src_ip, dst_ip, src_port, dst_port)) } /// Extract IPv6 src/dst IPs and TCP ports (minimal parsing) fn extract_ipv6_info(packet: &[u8]) -> Option<(IpAddr, IpAddr, u16, u16)> { // IPv6 header: 40 bytes minimum if packet.len() < 40 { return None; } // Check next header (offset 6): must be TCP (6) if packet[6] != 6 { return None; } // Extract source IP (offset 8-23) let src_ip = IpAddr::V6(Ipv6Addr::new( u16::from_be_bytes([packet[8], packet[9]]), u16::from_be_bytes([packet[10], packet[11]]), u16::from_be_bytes([packet[12], packet[13]]), u16::from_be_bytes([packet[14], packet[15]]), u16::from_be_bytes([packet[16], packet[17]]), u16::from_be_bytes([packet[18], packet[19]]), u16::from_be_bytes([packet[20], packet[21]]), u16::from_be_bytes([packet[22], packet[23]]), )); // Extract destination IP (offset 24-39) let dst_ip = IpAddr::V6(Ipv6Addr::new( u16::from_be_bytes([packet[24], packet[25]]), u16::from_be_bytes([packet[26], packet[27]]), u16::from_be_bytes([packet[28], packet[29]]), u16::from_be_bytes([packet[30], packet[31]]), u16::from_be_bytes([packet[32], packet[33]]), u16::from_be_bytes([packet[34], packet[35]]), u16::from_be_bytes([packet[36], packet[37]]), u16::from_be_bytes([packet[38], packet[39]]), )); // TCP header starts at offset 40 (IPv6 header is fixed 40 bytes) if packet.len() < 44 { return None; } // Extract TCP ports let src_port = u16::from_be_bytes([packet[40], packet[41]]); let dst_port = u16::from_be_bytes([packet[42], packet[43]]); Some((src_ip, dst_ip, src_port, dst_port)) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http_common.rs
huginn-net-http/src/http_common.rs
use crate::http; use std::collections::HashMap; use std::time::Instant; #[derive(Debug, Clone, PartialEq)] pub enum HeaderSource { Http1Line, Http2PseudoHeader, Http2Header, Http3Header, } /// Represents an HTTP header with metadata #[derive(Debug, Clone, PartialEq)] pub struct HttpHeader { pub name: String, pub value: Option<String>, /// Position in the original header sequence (0-based) pub position: usize, /// Source protocol/type of this header pub source: HeaderSource, } impl HttpHeader { pub fn new(name: &str, value: Option<&str>, position: usize, source: HeaderSource) -> Self { Self { name: name.to_string(), value: value.map(String::from), position, source } } } /// Represents an HTTP cookie #[derive(Debug, Clone, PartialEq)] pub struct HttpCookie { pub name: String, pub value: Option<String>, /// Position in the cookie header (0-based) pub position: usize, } /// Advanced parsing metadata for fingerprinting #[derive(Debug, Clone)] pub struct ParsingMetadata { pub header_count: usize, pub duplicate_headers: Vec<String>, pub case_variations: HashMap<String, Vec<String>>, pub parsing_time_ns: u64, pub has_malformed_headers: bool, pub request_line_length: usize, pub total_headers_length: usize, } impl ParsingMetadata { pub fn new() -> Self { Self { header_count: 0, duplicate_headers: Vec::new(), case_variations: HashMap::new(), parsing_time_ns: 0, has_malformed_headers: false, request_line_length: 0, total_headers_length: 0, } } pub fn with_timing<F, R>(mut self, f: F) -> (R, Self) where F: FnOnce() -> R, { let start = Instant::now(); let result = f(); self.parsing_time_ns = start.elapsed().as_nanos() as u64; (result, self) } } impl Default for ParsingMetadata { fn default() -> Self { Self::new() } } use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; /// Common trait for all HTTP parsers across different versions pub trait HttpParser { /// Get the HTTP version this parser supports fn supported_version(&self) -> http::Version; /// Check if this parser can handle the given data fn can_parse(&self, data: &[u8]) -> bool; /// Get a human-readable name for this parser fn name(&self) -> &'static str; /// Parse HTTP request data into observable signals /// Returns None if data cannot be parsed by this parser fn parse_request(&self, data: &[u8]) -> Option<ObservableHttpRequest>; /// Parse HTTP response data into observable signals /// Returns None if data cannot be parsed by this parser fn parse_response(&self, data: &[u8]) -> Option<ObservableHttpResponse>; } /// Common trait for HTTP protocol processors pub trait HttpProcessor { /// Check if this processor can handle the given request data fn can_process_request(&self, data: &[u8]) -> bool; /// Check if this processor can handle the given response data fn can_process_response(&self, data: &[u8]) -> bool; /// Check if the data appears to be complete for this protocol fn has_complete_data(&self, data: &[u8]) -> bool; /// Process HTTP request data and return observable request fn process_request( &self, data: &[u8], ) -> Result<Option<ObservableHttpRequest>, crate::error::HuginnNetHttpError>; /// Process HTTP response data and return observable response fn process_response( &self, data: &[u8], ) -> Result<Option<ObservableHttpResponse>, crate::error::HuginnNetHttpError>; /// Get the HTTP version this processor handles fn supported_version(&self) -> http::Version; /// Get a human-readable name for this processor fn name(&self) -> &'static str; } /// HTTP diagnostic function - determines the relationship between User-Agent and OS signature /// /// This function analyzes the consistency between the reported User-Agent string and /// the detected OS signature from TCP fingerprinting to identify potential spoofing. /// /// # Arguments /// * `user_agent` - Optional User-Agent string from HTTP headers /// * `ua_matcher` - Optional tuple of (OS name, browser flavor) extracted from User-Agent /// * `signature_os_matcher` - Optional OS label from TCP signature matching /// /// # Returns /// * `HttpDiagnosis::Anonymous` - No User-Agent provided /// * `HttpDiagnosis::Generic` - User-Agent OS matches TCP signature OS /// * `HttpDiagnosis::Dishonest` - User-Agent OS differs from TCP signature OS (potential spoofing) /// * `HttpDiagnosis::None` - Insufficient data for comparison pub fn get_diagnostic( user_agent: Option<String>, ua_matcher: Option<(&String, &Option<String>)>, signature_os_matcher: Option<&huginn_net_db::Label>, ) -> http::HttpDiagnosis { match user_agent { None => http::HttpDiagnosis::Anonymous, Some(_ua) => match (ua_matcher, signature_os_matcher) { (Some((ua_name_db, _ua_flavor_db)), Some(signature_label_db)) => { if ua_name_db.eq_ignore_ascii_case(&signature_label_db.name) { http::HttpDiagnosis::Generic } else { http::HttpDiagnosis::Dishonest } } _ => http::HttpDiagnosis::None, }, } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http_process.rs
huginn-net-http/src/http_process.rs
use crate::error::HuginnNetHttpError; use crate::http_common::HttpProcessor; use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; use crate::{http1_process, http2_process}; use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use pnet::packet::tcp::TcpPacket; use pnet::packet::Packet; use std::net::IpAddr; use std::time::Duration; use tracing::debug; use ttl_cache::TtlCache; /// FlowKey: (Client IP, Server IP, Client Port, Server Port) pub type FlowKey = (IpAddr, IpAddr, u16, u16); use crate::http_common::HttpParser; /// HTTP parser that automatically detects and processes different HTTP versions pub struct HttpProcessors { parsers: Vec<Box<dyn HttpParser>>, } impl HttpProcessors { pub fn new() -> Self { Self { parsers: vec![Box::new(Http1ParserAdapter::new()), Box::new(Http2ParserAdapter::new())], } } /// Parse HTTP request data using the appropriate parser pub fn parse_request(&self, data: &[u8]) -> Option<ObservableHttpRequest> { for parser in &self.parsers { if parser.can_parse(data) { if let Some(result) = parser.parse_request(data) { return Some(result); } } } None } /// Parse HTTP response data using the appropriate parser pub fn parse_response(&self, data: &[u8]) -> Option<ObservableHttpResponse> { for parser in &self.parsers { if parser.can_parse(data) { if let Some(result) = parser.parse_response(data) { return Some(result); } } } None } /// Get all supported HTTP versions pub fn supported_versions(&self) -> Vec<crate::http::Version> { self.parsers.iter().map(|p| p.supported_version()).collect() } } /// Adapter that bridges HTTP/1.x processor to the unified HttpParser interface struct Http1ParserAdapter { processor: http1_process::Http1Processor, } impl Http1ParserAdapter { fn new() -> Self { Self { processor: http1_process::Http1Processor::new() } } } impl HttpParser for Http1ParserAdapter { fn supported_version(&self) -> crate::http::Version { crate::http::Version::V11 } fn can_parse(&self, data: &[u8]) -> bool { self.processor.can_process_request(data) || self.processor.can_process_response(data) } fn name(&self) -> &'static str { "HTTP/1.x" } fn parse_request(&self, data: &[u8]) -> Option<ObservableHttpRequest> { self.processor.process_request(data).ok().flatten() } fn parse_response(&self, data: &[u8]) -> Option<ObservableHttpResponse> { self.processor.process_response(data).ok().flatten() } } /// Adapter that bridges HTTP/2 processor to the unified HttpParser interface struct Http2ParserAdapter { processor: http2_process::Http2Processor, } impl Http2ParserAdapter { fn new() -> Self { Self { processor: http2_process::Http2Processor::new() } } } impl HttpParser for Http2ParserAdapter { fn supported_version(&self) -> crate::http::Version { crate::http::Version::V20 } fn can_parse(&self, data: &[u8]) -> bool { self.processor.can_process_request(data) || self.processor.can_process_response(data) } fn name(&self) -> &'static str { "HTTP/2" } fn parse_request(&self, data: &[u8]) -> Option<ObservableHttpRequest> { self.processor.process_request(data).ok().flatten() } fn parse_response(&self, data: &[u8]) -> Option<ObservableHttpResponse> { self.processor.process_response(data).ok().flatten() } } impl Default for HttpProcessors { fn default() -> Self { Self::new() } } pub struct ObservableHttpPackage { pub http_request: Option<ObservableHttpRequest>, pub http_response: Option<ObservableHttpResponse>, } #[derive(Clone)] struct TcpData { sequence: u32, data: Vec<u8>, } pub struct TcpFlow { client_ip: IpAddr, server_ip: IpAddr, client_port: u16, server_port: u16, client_data: Vec<TcpData>, server_data: Vec<TcpData>, client_http_parsed: bool, server_http_parsed: bool, } /// Quick check if HTTP data is complete for parsing (supports HTTP/1.x and HTTP/2) fn has_complete_http_data(data: &[u8], processors: &HttpProcessors) -> bool { // Strategy: Don't make early decisions about protocol due to TCP fragmentation // Wait until we have enough data to make a reliable determination if data.len() < 4 { // Not enough data yet, wait for more TCP packets return false; } // Try to parse with any available parser - if successful, data is complete processors.parse_request(data).is_some() || processors.parse_response(data).is_some() } impl TcpFlow { fn init( src_ip: IpAddr, src_port: u16, dst_ip: IpAddr, dst_port: u16, tcp_data: TcpData, ) -> TcpFlow { TcpFlow { client_ip: src_ip, server_ip: dst_ip, client_port: src_port, server_port: dst_port, client_data: vec![tcp_data], server_data: Vec::new(), client_http_parsed: false, server_http_parsed: false, } } /// Traversing all the data in sequence in the correct order to build the full data /// /// # Parameters /// - `is_client`: If the data comes from the client. fn get_full_data(&self, is_client: bool) -> Vec<u8> { let data: &Vec<TcpData> = if is_client { &self.client_data } else { &self.server_data }; let mut sorted_data = data.clone(); sorted_data.sort_by_key(|tcp_data| tcp_data.sequence); let mut full_data = Vec::new(); for tcp_data in sorted_data { full_data.extend_from_slice(&tcp_data.data); } full_data } } pub fn process_http_ipv4( packet: &Ipv4Packet, http_flows: &mut TtlCache<FlowKey, TcpFlow>, processors: &HttpProcessors, ) -> Result<ObservableHttpPackage, HuginnNetHttpError> { if packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { return Err(HuginnNetHttpError::UnsupportedProtocol("IPv4".to_string())); } if let Some(tcp) = TcpPacket::new(packet.payload()) { process_tcp_packet( http_flows, tcp, IpAddr::V4(packet.get_source()), IpAddr::V4(packet.get_destination()), processors, ) } else { Ok(ObservableHttpPackage { http_request: None, http_response: None }) } } pub fn process_http_ipv6( packet: &Ipv6Packet, http_flows: &mut TtlCache<FlowKey, TcpFlow>, processors: &HttpProcessors, ) -> Result<ObservableHttpPackage, HuginnNetHttpError> { if packet.get_next_header() != IpNextHeaderProtocols::Tcp { return Err(HuginnNetHttpError::UnsupportedProtocol("IPv6".to_string())); } if let Some(tcp) = TcpPacket::new(packet.payload()) { process_tcp_packet( http_flows, tcp, IpAddr::V6(packet.get_source()), IpAddr::V6(packet.get_destination()), processors, ) } else { Ok(ObservableHttpPackage { http_request: None, http_response: None }) } } fn process_tcp_packet( http_flows: &mut TtlCache<FlowKey, TcpFlow>, tcp: TcpPacket, src_ip: IpAddr, dst_ip: IpAddr, processors: &HttpProcessors, ) -> Result<ObservableHttpPackage, HuginnNetHttpError> { let src_port: u16 = tcp.get_source(); let dst_port: u16 = tcp.get_destination(); let mut observable_http_package = ObservableHttpPackage { http_request: None, http_response: None }; let flow_key: FlowKey = (src_ip, dst_ip, src_port, dst_port); let (tcp_flow, is_client) = { if let Some(flow) = http_flows.get_mut(&flow_key) { (Some(flow), true) } else { let reversed_key: FlowKey = (dst_ip, src_ip, dst_port, src_port); if let Some(flow) = http_flows.get_mut(&reversed_key) { (Some(flow), false) } else { (None, false) } } }; if let Some(flow) = tcp_flow { if !tcp.payload().is_empty() { let tcp_data = TcpData { sequence: tcp.get_sequence(), data: Vec::from(tcp.payload()) }; if is_client && src_ip == flow.client_ip && src_port == flow.client_port { // Only add data and parse if not already parsed if !flow.client_http_parsed { flow.client_data.push(tcp_data); let full_data = flow.get_full_data(is_client); // Quick check before expensive parsing (supports HTTP/1.x and HTTP/2) if has_complete_http_data(&full_data, processors) { match parse_http_request(&full_data, processors) { Ok(Some(http_request_parsed)) => { observable_http_package.http_request = Some(http_request_parsed); flow.client_http_parsed = true; } Ok(None) => {} Err(_e) => {} } } } else { debug!("CLIENT: HTTP already parsed, discarding additional data"); } } else if src_ip == flow.server_ip && src_port == flow.server_port { // Only add data and parse if not already parsed if !flow.server_http_parsed { flow.server_data.push(tcp_data); let full_data = flow.get_full_data(is_client); // Quick check before expensive parsing (supports HTTP/1.x and HTTP/2) if has_complete_http_data(&full_data, processors) { match parse_http_response(&full_data, processors) { Ok(Some(http_response_parsed)) => { observable_http_package.http_response = Some(http_response_parsed); flow.server_http_parsed = true; } Ok(None) => {} Err(_e) => {} } } else { debug!("SERVER: Data not complete yet, waiting for more"); } } else { debug!("SERVER: HTTP already parsed, discarding additional data"); } } // Remove from http_flows if both request and response are parsed if flow.client_http_parsed && flow.server_http_parsed { debug!("Both HTTP request and response parsed, removing from http_flows early"); http_flows.remove(&flow_key); return Ok(observable_http_package); } // Clean up on connection close if tcp.get_flags() & (pnet::packet::tcp::TcpFlags::FIN | pnet::packet::tcp::TcpFlags::RST) != 0 { debug!("Connection closed or reset"); http_flows.remove(&flow_key); } } } else if tcp.get_flags() & pnet::packet::tcp::TcpFlags::SYN != 0 { let tcp_data: TcpData = TcpData { sequence: tcp.get_sequence(), data: Vec::from(tcp.payload()) }; let flow: TcpFlow = TcpFlow::init(src_ip, src_port, dst_ip, dst_port, tcp_data); http_flows.insert(flow_key, flow, Duration::new(60, 0)); } Ok(observable_http_package) } fn parse_http_request( data: &[u8], processors: &HttpProcessors, ) -> Result<Option<ObservableHttpRequest>, HuginnNetHttpError> { match processors.parse_request(data) { Some(request) => { debug!("Successfully parsed HTTP request using polymorphic parser"); Ok(Some(request)) } None => { debug!("No HTTP parser could handle request data"); Ok(None) } } } fn parse_http_response( data: &[u8], processors: &HttpProcessors, ) -> Result<Option<ObservableHttpResponse>, HuginnNetHttpError> { match processors.parse_response(data) { Some(response) => { debug!("Successfully parsed HTTP response using polymorphic parser"); Ok(Some(response)) } None => { debug!("No HTTP parser could handle response data"); Ok(None) } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http_languages.rs
huginn-net-http/src/http_languages.rs
use lazy_static::lazy_static; use std::collections::HashMap; lazy_static! { static ref LANGUAGES: HashMap<String, String> = { let mut map: HashMap<String, String> = HashMap::new(); map.insert("ro".to_string(), "Romanian".to_string()); map.insert("sw".to_string(), "Swahili".to_string()); map.insert("ne".to_string(), "Nepali".to_string()); map.insert("nl".to_string(), "Dutch".to_string()); map.insert("sn".to_string(), "Shona".to_string()); map.insert("ln".to_string(), "Lingala".to_string()); map.insert("en".to_string(), "English".to_string()); map.insert("ie".to_string(), "Interlingue".to_string()); map.insert("bg".to_string(), "Bulgarian".to_string()); map.insert("ha".to_string(), "Hausa".to_string()); map.insert("cs".to_string(), "Czech".to_string()); map.insert("ko".to_string(), "Korean".to_string()); map.insert("gv".to_string(), "Manx".to_string()); map.insert("vi".to_string(), "Vietnamese".to_string()); map.insert("mt".to_string(), "Maltese".to_string()); map.insert("bo".to_string(), "Tibetan".to_string()); map.insert("de".to_string(), "German".to_string()); map.insert("pa".to_string(), "Panjabi".to_string()); map.insert("lg".to_string(), "Ganda".to_string()); map.insert("tk".to_string(), "Turkmen".to_string()); map.insert("gl".to_string(), "Galician".to_string()); map.insert("yo".to_string(), "Yoruba".to_string()); map.insert("sc".to_string(), "Sardinian".to_string()); map.insert("or".to_string(), "Oriya".to_string()); map.insert("fr".to_string(), "French".to_string()); map.insert("ae".to_string(), "Avestan".to_string()); map.insert("am".to_string(), "Amharic".to_string()); map.insert("mh".to_string(), "Marshallese".to_string()); map.insert("hr".to_string(), "Croatian".to_string()); map.insert("sg".to_string(), "Sango".to_string()); map.insert("ps".to_string(), "Pushto".to_string()); map.insert("to".to_string(), "Tonga".to_string()); map.insert("kj".to_string(), "Kuanyama".to_string()); map.insert("kv".to_string(), "Komi".to_string()); map.insert("li".to_string(), "Limburgan".to_string()); map.insert("ng".to_string(), "Ndonga".to_string()); map.insert("lu".to_string(), "Luba-Katanga".to_string()); map.insert("nn".to_string(), "Norwegian Nynorsk".to_string()); map.insert("es".to_string(), "Spanish".to_string()); map.insert("gn".to_string(), "Guarani".to_string()); map.insert("pl".to_string(), "Polish".to_string()); map.insert("om".to_string(), "Oromo".to_string()); map.insert("lb".to_string(), "Luxembourgish".to_string()); map.insert("se".to_string(), "Northern Sami".to_string()); map.insert("ab".to_string(), "Abkhazian".to_string()); map.insert("ar".to_string(), "Arabic".to_string()); map.insert("az".to_string(), "Azerbaijani".to_string()); map.insert("si".to_string(), "Sinhala".to_string()); map.insert("ba".to_string(), "Bashkir".to_string()); map.insert("sr".to_string(), "Serbian".to_string()); map.insert("vo".to_string(), "Volapuk".to_string()); map.insert("kl".to_string(), "Kalaallisut".to_string()); map.insert("th".to_string(), "Thai".to_string()); map.insert("cu".to_string(), "Church Slavic".to_string()); map.insert("ja".to_string(), "Japanese".to_string()); map.insert("fy".to_string(), "Western Frisian".to_string()); map.insert("ch".to_string(), "Chamorro".to_string()); map.insert("hy".to_string(), "Armenian".to_string()); map.insert("ht".to_string(), "Haitian".to_string()); map.insert("fo".to_string(), "Faroese".to_string()); map.insert("fj".to_string(), "Fijian".to_string()); map.insert("gd".to_string(), "Scottish Gaelic".to_string()); map.insert("ig".to_string(), "Igbo".to_string()); map.insert("is".to_string(), "Icelandic".to_string()); map.insert("bi".to_string(), "Bislama".to_string()); map.insert("za".to_string(), "Zhuang".to_string()); map.insert("eu".to_string(), "Basque".to_string()); map.insert("id".to_string(), "Indonesian".to_string()); map.insert("ks".to_string(), "Kashmiri".to_string()); map.insert("cr".to_string(), "Cree".to_string()); map.insert("ga".to_string(), "Irish".to_string()); map.insert("gu".to_string(), "Gujarati".to_string()); map.insert("st".to_string(), "Southern Sotho".to_string()); map.insert("ur".to_string(), "Urdu".to_string()); map.insert("ce".to_string(), "Chechen".to_string()); map.insert("kg".to_string(), "Kongo".to_string()); map.insert("he".to_string(), "Hebrew".to_string()); map.insert("dv".to_string(), "Dhivehi".to_string()); map.insert("ru".to_string(), "Russian".to_string()); map.insert("ts".to_string(), "Tsonga".to_string()); map.insert("bn".to_string(), "Bengali".to_string()); map.insert("sv".to_string(), "Swedish".to_string()); map.insert("ug".to_string(), "Uighur".to_string()); map.insert("bs".to_string(), "Bosnian".to_string()); map.insert("wa".to_string(), "Walloon".to_string()); map.insert("ho".to_string(), "Hiri Motu".to_string()); map.insert("ii".to_string(), "Sichuan Yi".to_string()); map.insert("sk".to_string(), "Slovak".to_string()); map.insert("nb".to_string(), "Norwegian Bokmal".to_string()); map.insert("co".to_string(), "Corsican".to_string()); map.insert("lt".to_string(), "Lithuanian".to_string()); map.insert("ms".to_string(), "Malay".to_string()); map.insert("da".to_string(), "Danish".to_string()); map.insert("ny".to_string(), "Nyanja".to_string()); map.insert("ik".to_string(), "Inupiaq".to_string()); map.insert("iu".to_string(), "Inuktitut".to_string()); map.insert("sd".to_string(), "Sindhi".to_string()); map.insert("rw".to_string(), "Kinyarwanda".to_string()); map.insert("ki".to_string(), "Kikuyu".to_string()); map.insert("uk".to_string(), "Ukrainian".to_string()); map.insert("la".to_string(), "Latin".to_string()); map.insert("nr".to_string(), "South Ndebele".to_string()); map.insert("oc".to_string(), "Occitan".to_string()); map.insert("ml".to_string(), "Malayalam".to_string()); map.insert("ku".to_string(), "Kurdish".to_string()); map.insert("rn".to_string(), "Rundi".to_string()); map.insert("kn".to_string(), "Kannada".to_string()); map.insert("ta".to_string(), "Tamil".to_string()); map.insert("pi".to_string(), "Pali".to_string()); map.insert("sm".to_string(), "Samoan".to_string()); map.insert("tw".to_string(), "Twi".to_string()); map.insert("nd".to_string(), "North Ndebele".to_string()); map.insert("oj".to_string(), "Ojibwa".to_string()); map.insert("tl".to_string(), "Tagalog".to_string()); map.insert("aa".to_string(), "Afar".to_string()); map.insert("ay".to_string(), "Aymara".to_string()); map.insert("te".to_string(), "Telugu".to_string()); map.insert("eo".to_string(), "Esperanto".to_string()); map.insert("ia".to_string(), "Interlingua".to_string()); map.insert("xh".to_string(), "Xhosa".to_string()); map.insert("jv".to_string(), "Javanese".to_string()); map.insert("ty".to_string(), "Tahitian".to_string()); map.insert("os".to_string(), "Ossetian".to_string()); map.insert("et".to_string(), "Estonian".to_string()); map.insert("cy".to_string(), "Welsh".to_string()); map.insert("so".to_string(), "Somali".to_string()); map.insert("sq".to_string(), "Albanian".to_string()); map.insert("pt".to_string(), "Portuguese".to_string()); map.insert("tn".to_string(), "Tswana".to_string()); map.insert("zu".to_string(), "Zulu".to_string()); map.insert("bh".to_string(), "Bihari".to_string()); map.insert("mn".to_string(), "Mongolian".to_string()); map.insert("uz".to_string(), "Uzbek".to_string()); map.insert("lo".to_string(), "Lao".to_string()); map.insert("ee".to_string(), "Ewe".to_string()); map.insert("mg".to_string(), "Malagasy".to_string()); map.insert("lv".to_string(), "Latvian".to_string()); map.insert("fi".to_string(), "Finnish".to_string()); map.insert("af".to_string(), "Afrikaans".to_string()); map.insert("an".to_string(), "Aragonese".to_string()); map.insert("av".to_string(), "Avaric".to_string()); map.insert("hi".to_string(), "Hindi".to_string()); map.insert("ff".to_string(), "Fulah".to_string()); map.insert("nv".to_string(), "Navajo".to_string()); map.insert("fa".to_string(), "Persian".to_string()); map.insert("yi".to_string(), "Yiddish".to_string()); map.insert("kw".to_string(), "Cornish".to_string()); map.insert("tg".to_string(), "Tajik".to_string()); map.insert("be".to_string(), "Belarusian".to_string()); map.insert("na".to_string(), "Nauru".to_string()); map.insert("qu".to_string(), "Quechua".to_string()); map.insert("sh".to_string(), "Serbo-Croatian".to_string()); map.insert("dz".to_string(), "Dzongkha".to_string()); map.insert("kk".to_string(), "Kazakh".to_string()); map.insert("cv".to_string(), "Chuvash".to_string()); map.insert("kr".to_string(), "Kanuri".to_string()); map.insert("br".to_string(), "Breton".to_string()); map.insert("bm".to_string(), "Bambara".to_string()); map.insert("ss".to_string(), "Swati".to_string()); map.insert("tr".to_string(), "Turkish".to_string()); map.insert("mi".to_string(), "Maori".to_string()); map.insert("no".to_string(), "Norwegian".to_string()); map.insert("ak".to_string(), "Akan".to_string()); map.insert("as".to_string(), "Assamese".to_string()); map.insert("it".to_string(), "Italian".to_string()); map.insert("ca".to_string(), "Catalan".to_string()); map.insert("km".to_string(), "Central Khmer".to_string()); map.insert("mk".to_string(), "Macedonian".to_string()); map.insert("tt".to_string(), "Tatar".to_string()); map.insert("rm".to_string(), "Romansh".to_string()); map.insert("io".to_string(), "Ido".to_string()); map.insert("sl".to_string(), "Slovenian".to_string()); map.insert("hz".to_string(), "Herero".to_string()); map.insert("ka".to_string(), "Georgian".to_string()); map.insert("ky".to_string(), "Kirghiz".to_string()); map.insert("ve".to_string(), "Venda".to_string()); map.insert("el".to_string(), "Modern Greek".to_string()); map.insert("sa".to_string(), "Sanskrit".to_string()); map.insert("wo".to_string(), "Wolof".to_string()); map.insert("mr".to_string(), "Marathi".to_string()); map.insert("zh".to_string(), "Chinese".to_string()); map.insert("su".to_string(), "Sundanese".to_string()); map.insert("my".to_string(), "Burmese".to_string()); map.insert("hu".to_string(), "Hungarian".to_string()); map.insert("ti".to_string(), "Tigrinya".to_string()); map }; } pub fn get_highest_quality_language(accept_language: String) -> Option<String> { accept_language .split(',') .enumerate() .filter_map(|(index, part)| { let mut lang_and_quality = part.split(';'); let full_language = lang_and_quality.next()?.trim(); // Skip empty language parts (malformed entries like ",," or ";q=0.5") if full_language.is_empty() { return None; } let language = full_language.split('-').next().unwrap_or("").to_string(); let quality: f32 = lang_and_quality .next() .and_then(|q| q.trim_start_matches("q=").parse::<f32>().ok()) .unwrap_or(1.0); LANGUAGES .get(&language) .map(|lang_name| (quality, index, lang_name.clone())) }) .max_by(|a, b| { // First compare by quality (higher is better) match a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal) { std::cmp::Ordering::Equal => { // In case of tie, prefer earlier index (lower is better) b.1.cmp(&a.1) // Reverse order for index (earlier = smaller index) } other => other, } }) .map(|(_, _, language_name)| language_name) }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/output.rs
huginn-net-http/src/output.rs
use huginn_net_db::{Label, MatchQualityType, Type}; use std::fmt; use std::fmt::Formatter; /// Represents the output from HTTP analysis. /// /// This struct contains various optional outputs that can be derived /// from analyzing HTTP packets. #[derive(Debug)] pub struct HttpAnalysisResult { /// Information derived from HTTP request packets. pub http_request: Option<HttpRequestOutput>, /// Information derived from HTTP response packets. pub http_response: Option<HttpResponseOutput>, } #[derive(Debug, Clone, PartialEq)] pub struct IpPort { pub ip: std::net::IpAddr, pub port: u16, } impl IpPort { pub fn new(ip: std::net::IpAddr, port: u16) -> Self { Self { ip, port } } } use crate::observable::{ObservableHttpRequest, ObservableHttpResponse}; use huginn_net_db::http::HttpDiagnosis; #[derive(Debug)] pub struct BrowserQualityMatched { pub browser: Option<Browser>, pub quality: MatchQualityType, } /// Represents a browser. /// /// This struct contains the name, family, variant, and kind of browser. /// Examples: /// - name: "", family: "chrome", variant: "11.x to 26.x", kind: Type::Specified /// - name: "", family: "firefox", variant: "3.x", kind: Type::Specified #[derive(Debug)] pub struct Browser { pub name: String, pub family: Option<String>, pub variant: Option<String>, pub kind: Type, } impl From<&Label> for Browser { fn from(label: &Label) -> Self { Browser { name: label.name.clone(), family: label.class.clone(), variant: label.flavor.clone(), kind: label.ty.clone(), } } } /// Holds information derived from analyzing HTTP request headers. /// /// This structure contains details about the client, the detected application /// (if any), the preferred language, diagnostic parameters related to HTTP behavior, /// and the raw HTTP signature. #[derive(Debug)] pub struct HttpRequestOutput { /// The source IP address and port of the client making the request. pub source: IpPort, /// The destination IP address and port of the server receiving the request. pub destination: IpPort, /// The preferred language indicated in the `Accept-Language` header, if present. pub lang: Option<String>, /// Diagnostic information about potential HTTP specification violations or common practices. pub diagnosis: HttpDiagnosis, /// The browser with the highest quality that matches the HTTP request. pub browser_matched: BrowserQualityMatched, /// The raw signature representing the HTTP headers and their order. pub sig: ObservableHttpRequest, } impl fmt::Display for HttpRequestOutput { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "[HTTP Request] {}:{} → {}:{}\n\ Browser: {}\n\ Lang: {}\n\ Params: {}\n\ Sig: {}\n", self.source.ip, self.source.port, self.destination.ip, self.destination.port, self.browser_matched .browser .as_ref() .map_or("???".to_string(), |browser| { format!( "{}:{}", browser.family.as_deref().unwrap_or("???"), browser.variant.as_deref().unwrap_or("???") ) }), self.lang.as_deref().unwrap_or("???"), self.diagnosis, self.sig, ) } } #[derive(Debug)] pub struct WebServerQualityMatched { pub web_server: Option<WebServer>, pub quality: MatchQualityType, } /// Represents a web server. /// /// This struct contains the name, family, variant, and kind of browser. /// Examples: /// - name: "", family: "apache", variant: "2.x", kind: Type::Specified /// - name: "", family: "nginx", variant: "1.x", kind: Type::Specified #[derive(Debug)] pub struct WebServer { pub name: String, pub family: Option<String>, pub variant: Option<String>, pub kind: Type, } impl From<&Label> for WebServer { fn from(label: &Label) -> Self { WebServer { name: label.name.clone(), family: label.class.clone(), variant: label.flavor.clone(), kind: label.ty.clone(), } } } /// Holds information derived from analyzing HTTP response headers. /// /// This structure contains details about the server, the detected application /// (if any), diagnostic parameters related to HTTP behavior, and the raw HTTP signature. #[derive(Debug)] pub struct HttpResponseOutput { /// The source IP address and port of the server sending the response. pub source: IpPort, /// The destination IP address and port of the client receiving the response. pub destination: IpPort, /// Diagnostic information about potential HTTP specification violations or common practices. pub diagnosis: HttpDiagnosis, /// The label identifying the likely server application (e.g., Apache, Nginx) and the quality. pub web_server_matched: WebServerQualityMatched, /// The raw signature representing the HTTP headers and their order. pub sig: ObservableHttpResponse, } impl fmt::Display for HttpResponseOutput { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "[HTTP Response] {}:{} → {}:{}\n\ Server: {}\n\ Params: {}\n\ Sig: {}\n", self.source.ip, self.source.port, self.destination.ip, self.destination.port, self.web_server_matched .web_server .as_ref() .map_or("???".to_string(), |web_server| { if !web_server.name.is_empty() { web_server.name.clone() } else { format!( "{}:{}", web_server.family.as_deref().unwrap_or("???"), web_server.variant.as_deref().unwrap_or("???") ) } }), self.diagnosis, self.sig, ) } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/akamai.rs
huginn-net-http/src/akamai.rs
use std::fmt; /// HTTP/2 Setting parameter ID #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[repr(u16)] pub enum SettingId { HeaderTableSize = 1, EnablePush = 2, MaxConcurrentStreams = 3, InitialWindowSize = 4, MaxFrameSize = 5, MaxHeaderListSize = 6, NoRfc7540Priorities = 9, Unknown(u16), } impl From<u16> for SettingId { fn from(id: u16) -> Self { match id { 1 => Self::HeaderTableSize, 2 => Self::EnablePush, 3 => Self::MaxConcurrentStreams, 4 => Self::InitialWindowSize, 5 => Self::MaxFrameSize, 6 => Self::MaxHeaderListSize, 9 => Self::NoRfc7540Priorities, other => Self::Unknown(other), } } } impl fmt::Display for SettingId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::HeaderTableSize => write!(f, "HEADER_TABLE_SIZE"), Self::EnablePush => write!(f, "ENABLE_PUSH"), Self::MaxConcurrentStreams => write!(f, "MAX_CONCURRENT_STREAMS"), Self::InitialWindowSize => write!(f, "INITIAL_WINDOW_SIZE"), Self::MaxFrameSize => write!(f, "MAX_FRAME_SIZE"), Self::MaxHeaderListSize => write!(f, "MAX_HEADER_LIST_SIZE"), Self::NoRfc7540Priorities => write!(f, "NO_RFC7540_PRIORITIES"), Self::Unknown(id) => write!(f, "UNKNOWN_{id}"), } } } impl SettingId { /// Convert to numeric ID for fingerprint generation #[must_use] pub const fn as_u16(self) -> u16 { match self { Self::HeaderTableSize => 1, Self::EnablePush => 2, Self::MaxConcurrentStreams => 3, Self::InitialWindowSize => 4, Self::MaxFrameSize => 5, Self::MaxHeaderListSize => 6, Self::NoRfc7540Priorities => 9, Self::Unknown(id) => id, } } } /// HTTP/2 Setting parameter (ID and value) #[derive(Debug, Clone, PartialEq, Eq)] pub struct SettingParameter { pub id: SettingId, pub value: u32, } impl fmt::Display for SettingParameter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}: {}", self.id, self.value) } } /// HTTP/2 Priority information /// /// Weight in HTTP/2 spec is 0-255, but represents 1-256 (add 1 to value) #[derive(Debug, Clone, PartialEq, Eq)] pub struct Http2Priority { pub stream_id: u32, pub exclusive: bool, pub depends_on: u32, pub weight: u8, // 0-255 in frame, represents 1-256 } impl fmt::Display for Http2Priority { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "stream={}, exclusive={}, depends_on={}, weight={}", self.stream_id, self.exclusive, self.depends_on, self.weight.saturating_add(1) // Display as 1-256 ) } } /// Pseudo-header order in HTTP/2 HEADERS frame /// /// Common orders: /// - Chrome: :method, :path, :authority, :scheme /// - Firefox: :method, :path, :authority, :scheme #[derive(Debug, Clone, PartialEq, Eq)] pub enum PseudoHeader { Method, Path, Authority, Scheme, Status, Unknown(String), } impl From<&str> for PseudoHeader { fn from(s: &str) -> Self { match s { ":method" => Self::Method, ":path" => Self::Path, ":authority" => Self::Authority, ":scheme" => Self::Scheme, ":status" => Self::Status, other => Self::Unknown(other.to_string()), } } } impl fmt::Display for PseudoHeader { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Method => write!(f, "m"), Self::Path => write!(f, "p"), Self::Authority => write!(f, "a"), Self::Scheme => write!(f, "s"), Self::Status => write!(f, "st"), Self::Unknown(name) => write!(f, "?{name}"), } } } /// Akamai HTTP/2 Fingerprint /// /// Based on: https://www.blackhat.com/docs/eu-17/materials/eu-17-Shuster-Passive-Fingerprinting-Of-HTTP2-Clients-wp.pdf /// /// Format: `S[;]|WU|P[,]|PS[,]` /// - S: Settings parameters (id:value;...) /// - WU: Window Update value /// - P: Priority frames (stream:exclusive:depends_on:weight,...) /// - PS: Pseudo-header order (m,p,a,s) /// /// Example: `1:65536;2:0;3:1000;4:6291456;5:16384;6:262144|15663105|0|m,p,a,s` #[derive(Debug, Clone, PartialEq, Eq)] pub struct AkamaiFingerprint { /// SETTINGS frame parameters (order matters) pub settings: Vec<SettingParameter>, /// WINDOW_UPDATE initial value pub window_update: u32, /// PRIORITY frames pub priority_frames: Vec<Http2Priority>, /// Pseudo-header order from HEADERS frame pub pseudo_header_order: Vec<PseudoHeader>, /// Fingerprint string representation pub fingerprint: String, /// Hash of the fingerprint (for database lookup) pub hash: String, } impl AkamaiFingerprint { /// Generate the Akamai fingerprint string /// /// Format: `settings|window_update|priorities|pseudo_headers` #[must_use] pub fn generate_fingerprint_string( settings: &[SettingParameter], window_update: u32, priority_frames: &[Http2Priority], pseudo_header_order: &[PseudoHeader], ) -> String { // Settings: id:value;id:value;... let settings_str = if settings.is_empty() { String::new() } else { settings .iter() .map(|s| format!("{}:{}", s.id.as_u16(), s.value)) .collect::<Vec<_>>() .join(";") }; // Window Update: value or "00" if not present let window_str = if window_update == 0 { "00".to_string() } else { window_update.to_string() }; // Priority: stream:exclusive:depends_on:weight,... let priority_str = if priority_frames.is_empty() { "0".to_string() } else { priority_frames .iter() .map(|p| { format!( "{}:{}:{}:{}", p.stream_id, u8::from(p.exclusive), p.depends_on, u16::from(p.weight).saturating_add(1) // Weight is 1-256, RFC 7540 says byte+1 ) }) .collect::<Vec<_>>() .join(",") }; // Pseudo-headers: m,p,a,s let pseudo_str = pseudo_header_order .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(","); format!("{settings_str}|{window_str}|{priority_str}|{pseudo_str}") } /// Create a new Akamai fingerprint /// /// # Parameters /// - `settings`: SETTINGS frame parameters (order matters) /// - `window_update`: WINDOW_UPDATE value /// - `priority_frames`: PRIORITY frames /// - `pseudo_header_order`: Order in which pseudo-headers (`:method`, `:path`, `:authority`, `:scheme`) appear in the HEADERS frame. This order is extracted from the first HEADERS frame with stream_id > 0 and is critical for fingerprint accuracy. #[must_use] pub fn new( settings: Vec<SettingParameter>, window_update: u32, priority_frames: Vec<Http2Priority>, pseudo_header_order: Vec<PseudoHeader>, ) -> Self { let fingerprint = Self::generate_fingerprint_string( &settings, window_update, &priority_frames, &pseudo_header_order, ); let hash = Self::hash_fingerprint(&fingerprint); Self { settings, window_update, priority_frames, pseudo_header_order, fingerprint, hash } } /// Hash the fingerprint for database lookup (SHA-256 truncated) #[must_use] pub fn hash_fingerprint(fingerprint: &str) -> String { use sha2::{Digest, Sha256}; let mut hasher = Sha256::new(); hasher.update(fingerprint.as_bytes()); let result = hasher.finalize(); // Truncate to first 16 bytes (32 hex chars) format!("{result:x}").chars().take(32).collect::<String>() } } impl fmt::Display for AkamaiFingerprint { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "Akamai HTTP/2 Fingerprint:")?; writeln!(f, " Fingerprint: {}", self.fingerprint)?; writeln!(f, " Hash: {}", self.hash)?; writeln!(f)?; writeln!(f, " SETTINGS:")?; for setting in &self.settings { writeln!(f, " {setting}")?; } writeln!(f)?; writeln!(f, " WINDOW_UPDATE: {}", self.window_update)?; writeln!(f)?; if self.priority_frames.is_empty() { writeln!(f, " PRIORITY: none")?; } else { writeln!(f, " PRIORITY:")?; for priority in &self.priority_frames { writeln!(f, " {priority}")?; } } writeln!(f)?; writeln!( f, " Pseudo-headers: {}", self.pseudo_header_order .iter() .map(ToString::to_string) .collect::<Vec<_>>() .join(", ") ) } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/http2_fingerprint_extractor.rs
huginn-net-http/src/http2_fingerprint_extractor.rs
use crate::akamai::AkamaiFingerprint; use crate::akamai_extractor::extract_akamai_fingerprint; use crate::http2_parser::{Http2ParseError, Http2Parser, HTTP2_CONNECTION_PREFACE}; /// HTTP/2 fingerprint extractor with incremental parsing support /// /// This struct manages buffering, parsing, and fingerprint extraction for HTTP/2 connections, /// handling incremental data arrival and connection preface automatically. /// /// # Example /// ```no_run /// use huginn_net_http::http2_fingerprint_extractor::Http2FingerprintExtractor; /// /// let mut extractor = Http2FingerprintExtractor::new(); /// /// // Add data incrementally /// extractor.add_bytes(b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"); /// extractor.add_bytes(b"\x00\x00\x06\x04\x00\x00\x00\x00\x00"); /// /// // Check if fingerprint is ready /// if let Some(fingerprint) = extractor.get_fingerprint() { /// println!("Akamai: {}", fingerprint.fingerprint); /// } /// ``` pub struct Http2FingerprintExtractor { parser: Http2Parser<'static>, buffer: Vec<u8>, parsed_offset: usize, fingerprint: Option<AkamaiFingerprint>, } impl Http2FingerprintExtractor { /// Create a new HTTP/2 fingerprint extractor /// /// # Returns /// A new `Http2FingerprintExtractor` instance ready to process HTTP/2 data #[must_use] pub fn new() -> Self { Self { parser: Http2Parser::new(), buffer: Vec::with_capacity(64 * 1024), parsed_offset: 0, fingerprint: None, } } /// Add bytes to the buffer and attempt to extract fingerprint /// /// This method handles incremental data arrival, automatically skipping the HTTP/2 /// connection preface and parsing frames as they become available. /// /// # Parameters /// - `data`: New bytes to add to the buffer /// /// # Returns /// - `Ok(Some(AkamaiFingerprint))` if fingerprint was successfully extracted /// - `Ok(None)` if more data is needed or fingerprint already extracted /// - `Err(Http2ParseError)` if parsing fails /// /// # Example /// ```no_run /// use huginn_net_http::http2_fingerprint_extractor::Http2FingerprintExtractor; /// /// let mut extractor = Http2FingerprintExtractor::new(); /// match extractor.add_bytes(b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\x00\x00\x06\x04\x00\x00\x00\x00\x00") { /// Ok(Some(fingerprint)) => println!("Got fingerprint: {}", fingerprint.fingerprint), /// Ok(None) => println!("Need more data"), /// Err(e) => eprintln!("Error: {:?}", e), /// } /// ``` pub fn add_bytes(&mut self, data: &[u8]) -> Result<Option<AkamaiFingerprint>, Http2ParseError> { // If fingerprint already extracted, don't process more data if self.fingerprint.is_some() { return Ok(None); } self.buffer.extend_from_slice(data); // Skip HTTP/2 connection preface let start_offset = if self.parsed_offset == 0 && self.buffer.starts_with(HTTP2_CONNECTION_PREFACE) { HTTP2_CONNECTION_PREFACE.len() } else { self.parsed_offset }; let frame_data = &self.buffer[start_offset..]; if frame_data.len() >= 9 { match self.parser.parse_frames_with_offset(frame_data) { Ok((frames, bytes_consumed)) => { if !frames.is_empty() { // Update parsed_offset based on actual bytes consumed self.parsed_offset = start_offset.saturating_add(bytes_consumed); if let Some(fingerprint) = extract_akamai_fingerprint(&frames) { self.fingerprint = Some(fingerprint.clone()); return Ok(Some(fingerprint)); } } } Err(e) => { // Parsing error, might need more data return Err(e); } } } Ok(None) } /// Get the extracted fingerprint if available /// /// # Returns /// - `Some(AkamaiFingerprint)` if fingerprint has been extracted /// - `None` if fingerprint not yet available #[must_use] pub fn get_fingerprint(&self) -> Option<&AkamaiFingerprint> { self.fingerprint.as_ref() } /// Check if fingerprint has been extracted /// /// # Returns /// `true` if fingerprint is available, `false` otherwise #[must_use] pub fn fingerprint_extracted(&self) -> bool { self.fingerprint.is_some() } /// Reset the extractor to process a new connection /// /// Clears the buffer and resets parsing state, allowing the extractor to be reused. pub fn reset(&mut self) { self.buffer.clear(); self.parsed_offset = 0; self.fingerprint = None; } } impl Default for Http2FingerprintExtractor { fn default() -> Self { Self::new() } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/src/packet_parser.rs
huginn-net-http/src/packet_parser.rs
/// Packet parsing utilities for different network packet formats /// /// This module provides unified parsing for various network packet formats /// from both live network capture and PCAP files: /// - Ethernet frames (most common in network interfaces) /// - Raw IP packets (tunnels, loopback interfaces) /// - NULL datalink packets (specialized capture tools) /// - Future packet formats can be added here use pnet::packet::ethernet::{EtherTypes, EthernetPacket}; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use tracing::debug; /// Represents the result of IP packet parsing #[derive(Debug)] pub enum IpPacket<'a> { Ipv4(Ipv4Packet<'a>), Ipv6(Ipv6Packet<'a>), None, } /// Datalink format types supported #[derive(Debug, Clone, Copy, PartialEq)] pub enum DatalinkFormat { /// Standard Ethernet frame (14-byte header) Ethernet, /// Raw IP packet (no datalink header) RawIp, /// NULL datalink with 4-byte header (0x1e 0x00 ...) Null, } /// Parse a network packet using multiple format strategies /// /// Tries different parsing strategies in order of likelihood: /// 1. Ethernet (most common in network interfaces and PCAPs) /// 2. Raw IP (tunnels, loopback interfaces, some PCAPs) /// 3. NULL datalink (specialized capture tools) /// /// Works with packets from both live network capture and PCAP files. /// /// # Arguments /// * `packet` - Raw packet bytes from network interface or PCAP file /// /// # Returns /// * `IpPacket` - The parsed IP packet or None if no valid format found pub fn parse_packet(packet: &[u8]) -> IpPacket<'_> { // Strategy 1: Try Ethernet first (most common) if let Some(parsed) = try_ethernet_format(packet) { return parsed; } // Strategy 2: Try Raw IP (no Ethernet header) if let Some(parsed) = try_raw_ip_format(packet) { return parsed; } // Strategy 3: Try NULL datalink (skip 4-byte header) if let Some(parsed) = try_null_datalink_format(packet) { return parsed; } IpPacket::None } /// Try parsing as Ethernet frame fn try_ethernet_format(packet: &[u8]) -> Option<IpPacket<'_>> { // Ethernet header is 14 bytes: [6B dst][6B src][2B ethertype] if packet.len() < 14 { return None; } let ethernet = EthernetPacket::new(packet)?; let ip_data = &packet[14..]; // Skip 14-byte Ethernet header match ethernet.get_ethertype() { EtherTypes::Ipv4 => { if let Some(ipv4) = Ipv4Packet::new(ip_data) { debug!("Parsed Ethernet IPv4 packet"); return Some(IpPacket::Ipv4(ipv4)); } } EtherTypes::Ipv6 => { if let Some(ipv6) = Ipv6Packet::new(ip_data) { debug!("Parsed Ethernet IPv6 packet"); return Some(IpPacket::Ipv6(ipv6)); } } _ => {} } None } /// Try parsing as Raw IP (no datalink header) fn try_raw_ip_format(packet: &[u8]) -> Option<IpPacket<'_>> { if packet.len() < 20 { return None; } // Check IP version in first 4 bits let version = (packet[0] & 0xF0) >> 4; match version { 4 => { if let Some(ipv4) = Ipv4Packet::new(packet) { debug!("Parsed Raw IPv4 packet"); return Some(IpPacket::Ipv4(ipv4)); } } 6 => { if let Some(ipv6) = Ipv6Packet::new(packet) { debug!("Parsed Raw IPv6 packet"); return Some(IpPacket::Ipv6(ipv6)); } } _ => {} } None } /// Try parsing as NULL datalink format (4-byte header) fn try_null_datalink_format(packet: &[u8]) -> Option<IpPacket<'_>> { // Check for NULL datalink signature and minimum size if packet.len() < 24 || packet[0] != 0x1e || packet[1] != 0x00 { return None; } let ip_data = &packet[4..]; // Skip 4-byte NULL header let version = (ip_data[0] & 0xF0) >> 4; match version { 4 => { if let Some(ipv4) = Ipv4Packet::new(ip_data) { debug!("Parsed NULL datalink IPv4 packet"); return Some(IpPacket::Ipv4(ipv4)); } } 6 => { if let Some(ipv6) = Ipv6Packet::new(ip_data) { debug!("Parsed NULL datalink IPv6 packet"); return Some(IpPacket::Ipv6(ipv6)); } } _ => {} } None } /// Detect the datalink format of a packet without full parsing /// /// Useful for statistics or format validation pub fn detect_datalink_format(packet: &[u8]) -> Option<DatalinkFormat> { // Check NULL datalink first (most specific signature) if packet.len() >= 24 && packet[0] == 0x1e && packet[1] == 0x00 { let ip_data = &packet[4..]; let version = (ip_data[0] & 0xF0) >> 4; if version == 4 || version == 6 { return Some(DatalinkFormat::Null); } } // Check Raw IP (check if it starts with valid IP version) if packet.len() >= 20 { let version = (packet[0] & 0xF0) >> 4; if version == 4 || version == 6 { // Additional validation for IPv4 if version == 4 { let ihl = (packet[0] & 0x0F).saturating_mul(4); if ihl >= 20 && packet.len() >= usize::from(ihl) { return Some(DatalinkFormat::RawIp); } } // Additional validation for IPv6 else if version == 6 && packet.len() >= 40 { return Some(DatalinkFormat::RawIp); } } } // Check Ethernet (least specific - needs valid EtherType) if packet.len() >= 14 { if let Some(ethernet) = EthernetPacket::new(packet) { let ethertype = ethernet.get_ethertype(); // Only consider it Ethernet if it has a valid IP EtherType if ethertype == EtherTypes::Ipv4 || ethertype == EtherTypes::Ipv6 { let ip_data = &packet[14..]; if !ip_data.is_empty() { let version = (ip_data[0] & 0xF0) >> 4; if (ethertype == EtherTypes::Ipv4 && version == 4) || (ethertype == EtherTypes::Ipv6 && version == 6) { return Some(DatalinkFormat::Ethernet); } } } } } None }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/akamai_paper_golden_tests.rs
huginn-net-http/tests/akamai_paper_golden_tests.rs
//! Golden tests based on Blackhat EU 2017 Paper //! //! These tests validate against the REAL fingerprints published in the //! original research paper by Shuster, et al. //! //! Paper: https://blackhat.com/docs/eu-17/materials/eu-17-Shuster-Passive-Fingerprinting-Of-HTTP2-Clients-wp.pdf use huginn_net_http::{extract_akamai_fingerprint, Http2Frame}; use serde::{Deserialize, Serialize}; use std::fs; #[derive(Serialize, Deserialize, Debug, Clone)] struct PaperTestCase { name: String, description: String, paper_reference: String, frames: Vec<FrameSnapshot>, expected_fingerprint: Option<ExpectedFingerprint>, } #[derive(Serialize, Deserialize, Debug, Clone)] struct FrameSnapshot { frame_type: u8, flags: u8, stream_id: u32, payload: Vec<u8>, } #[derive(Serialize, Deserialize, Debug, Clone)] struct ExpectedFingerprint { signature: String, hash: String, settings_count: usize, window_update: u32, priority_frames_count: usize, pseudo_headers_count: usize, } impl From<FrameSnapshot> for Http2Frame { fn from(snapshot: FrameSnapshot) -> Self { Http2Frame::new(snapshot.frame_type, snapshot.flags, snapshot.stream_id, snapshot.payload) } } fn load_paper_test_cases() -> Vec<PaperTestCase> { let test_data = match fs::read_to_string("tests/snapshots/akamai_paper_cases.json") { Ok(data) => data, Err(e) => panic!("Failed to read akamai_paper_cases.json: {e}"), }; match serde_json::from_str(&test_data) { Ok(cases) => cases, Err(e) => panic!("Failed to parse paper test cases JSON: {e}"), } } struct ActualFingerprint<'a> { signature: &'a str, hash: &'a str, settings_count: usize, window_update: u32, priority_count: usize, pseudo_headers_count: usize, } fn assert_paper_fingerprint_matches( actual: &ActualFingerprint, expected: &ExpectedFingerprint, test_name: &str, ) { assert_eq!( actual.signature, expected.signature, "[{test_name}] Signature mismatch (vs Paper)" ); assert_eq!( actual.settings_count, expected.settings_count, "[{test_name}] Settings count mismatch" ); assert_eq!( actual.window_update, expected.window_update, "[{test_name}] Window update mismatch" ); assert_eq!( actual.priority_count, expected.priority_frames_count, "[{test_name}] Priority frames count mismatch" ); assert_eq!( actual.pseudo_headers_count, expected.pseudo_headers_count, "[{test_name}] Pseudo-headers count mismatch" ); // Hash validation is optional here since paper doesn't provide hashes if !expected.hash.starts_with("PAPER_") { assert_eq!(actual.hash, expected.hash, "[{test_name}] Hash mismatch"); } } /// Main test: Validates against Blackhat EU 2017 paper examples #[test] fn test_paper_golden_snapshots() { let test_cases = load_paper_test_cases(); for test_case in test_cases { println!("📄 Running Paper Test: {}", test_case.name); println!(" Description: {}", test_case.description); println!(" Reference: {}", test_case.paper_reference); let frames: Vec<Http2Frame> = test_case.frames.into_iter().map(Http2Frame::from).collect(); let fingerprint = extract_akamai_fingerprint(&frames); match (&fingerprint, &test_case.expected_fingerprint) { (Some(actual_fp), Some(expected)) => { let actual = ActualFingerprint { signature: &actual_fp.fingerprint, hash: &actual_fp.hash, settings_count: actual_fp.settings.len(), window_update: actual_fp.window_update, priority_count: actual_fp.priority_frames.len(), pseudo_headers_count: actual_fp.pseudo_header_order.len(), }; assert_paper_fingerprint_matches(&actual, expected, &test_case.name); } (None, None) => { /* expected */ } (Some(actual), None) => { panic!( "[{}] Expected no fingerprint, but got: {}", test_case.name, actual.fingerprint ); } (None, Some(_)) => { panic!( "[{}] Expected fingerprint from paper, but none was generated", test_case.name ); } } } } #[test] fn test_paper_chrome_61() { // Chrome 61 has the most PRIORITY frames in the paper example let frames = vec![ Http2Frame::new(0x4, 0x0, 0, vec![0, 3, 0, 0, 0, 100, 0, 4, 0, 96, 0, 0, 0, 2, 0, 0, 0, 0]), Http2Frame::new(0x8, 0x0, 0, vec![0, 238, 255, 1]), Http2Frame::new(0x2, 0x0, 3, vec![0, 0, 0, 0, 200]), Http2Frame::new(0x2, 0x0, 5, vec![0, 0, 0, 0, 100]), Http2Frame::new(0x2, 0x0, 7, vec![0, 0, 0, 0, 0]), Http2Frame::new(0x2, 0x0, 9, vec![0, 0, 0, 7, 0]), Http2Frame::new(0x2, 0x0, 11, vec![0, 0, 0, 3, 0]), Http2Frame::new(0x2, 0x0, 13, vec![0, 0, 0, 0, 240]), ]; let fingerprint = if let Some(fp) = extract_akamai_fingerprint(&frames) { fp } else { panic!("Failed to extract Chrome 61 fingerprint from paper"); }; // Verify structure assert_eq!(fingerprint.settings.len(), 3, "Chrome 61 should have 3 SETTINGS"); assert_eq!(fingerprint.window_update, 15662849, "Chrome 61 WINDOW_UPDATE"); assert_eq!(fingerprint.priority_frames.len(), 6, "Chrome 61 should have 6 PRIORITY frames"); // Verify signature format (values may differ slightly from paper due to exact byte values) assert!(fingerprint.fingerprint.contains("3:100")); assert!(fingerprint.fingerprint.contains("4:6291456")); assert!(fingerprint.fingerprint.contains("|15662849|")); // Actual value from payload } #[test] fn test_paper_firefox_55() { let frames = vec![ Http2Frame::new(0x4, 0x0, 0, vec![0, 3, 0, 0, 16, 0, 0, 4, 0, 2, 0, 0, 0, 1, 0, 1, 0, 0]), Http2Frame::new(0x8, 0x0, 0, vec![0, 190, 255, 1]), ]; let fingerprint = if let Some(fp) = extract_akamai_fingerprint(&frames) { fp } else { panic!("Failed to extract Firefox 55 fingerprint from paper"); }; assert_eq!(fingerprint.settings.len(), 3, "Firefox 55 should have 3 SETTINGS"); assert_eq!(fingerprint.window_update, 12517121, "Firefox 55 WINDOW_UPDATE"); assert_eq!(fingerprint.priority_frames.len(), 0, "Firefox 55 has no PRIORITY frames"); } #[test] fn test_paper_safari_11() { let frames = vec![ Http2Frame::new( 0x4, 0x0, 0, vec![0, 3, 0, 0, 0, 100, 0, 4, 0, 0, 255, 255, 0, 2, 0, 0, 0, 1], ), Http2Frame::new(0x8, 0x0, 0, vec![0, 255, 0, 1]), Http2Frame::new(0x2, 0x0, 3, vec![128, 0, 0, 0, 255]), Http2Frame::new(0x2, 0x0, 5, vec![128, 0, 0, 3, 255]), ]; let fingerprint = if let Some(fp) = extract_akamai_fingerprint(&frames) { fp } else { panic!("Failed to extract Safari 11 fingerprint from paper"); }; assert_eq!(fingerprint.settings.len(), 3, "Safari 11 should have 3 SETTINGS"); assert_eq!(fingerprint.window_update, 16711681, "Safari 11 WINDOW_UPDATE"); assert_eq!(fingerprint.priority_frames.len(), 2, "Safari 11 should have 2 PRIORITY frames"); // Safari uses exclusive bit (0x80) assert!( fingerprint.priority_frames.iter().all(|p| p.exclusive), "Safari 11 PRIORITY frames should be exclusive" ); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/raw_filter_tests.rs
huginn-net-http/tests/raw_filter_tests.rs
use std::net::IpAddr; use huginn_net_http::filter::{FilterConfig, FilterMode, IpFilter, PortFilter}; /// Helper to create an IPv4 TCP packet (minimal: IP header + TCP ports) fn create_ipv4_tcp_packet( src_ip: [u8; 4], dst_ip: [u8; 4], src_port: u16, dst_port: u16, ) -> Vec<u8> { let mut packet = vec![0u8; 40]; packet[0] = 0x45; // IPv4, IHL=5 packet[9] = 6; // TCP protocol packet[12..16].copy_from_slice(&src_ip); packet[16..20].copy_from_slice(&dst_ip); packet[20..22].copy_from_slice(&src_port.to_be_bytes()); packet[22..24].copy_from_slice(&dst_port.to_be_bytes()); packet } /// Helper to create an Ethernet frame with IPv4 TCP packet fn create_ethernet_ipv4_tcp_packet( src_ip: [u8; 4], dst_ip: [u8; 4], src_port: u16, dst_port: u16, ) -> Vec<u8> { let mut packet = vec![0u8; 54]; packet[12..14].copy_from_slice(&[0x08, 0x00]); // IPv4 EtherType packet[14] = 0x45; // IPv4, IHL=5 packet[23] = 6; // TCP protocol packet[26..30].copy_from_slice(&src_ip); packet[30..34].copy_from_slice(&dst_ip); packet[34..36].copy_from_slice(&src_port.to_be_bytes()); packet[36..38].copy_from_slice(&dst_port.to_be_bytes()); packet } #[test] fn test_raw_filter_ipv4_raw_packet() { let packet = create_ipv4_tcp_packet([192, 168, 1, 100], [8, 8, 8, 8], 12345, 443); assert_eq!(packet.len(), 40); assert_eq!(packet[0], 0x45); // IPv4, IHL=5 assert_eq!(packet[9], 6); // TCP protocol } #[test] fn test_raw_filter_ethernet_frame() { let packet = create_ethernet_ipv4_tcp_packet([192, 168, 1, 100], [8, 8, 8, 8], 12345, 443); assert_eq!(packet.len(), 54); assert_eq!(&packet[12..14], &[0x08, 0x00]); // IPv4 EtherType assert_eq!(packet[14], 0x45); // IPv4, IHL=5 assert_eq!(packet[23], 6); // TCP protocol } #[test] fn test_raw_filter_allows_matching_destination_port() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_blocks_non_matching_destination_port() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 80)); } #[test] fn test_raw_filter_allows_matching_source_ip() { let filter = FilterConfig::new().mode(FilterMode::Allow).with_ip_filter( IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IP: {e}")) .source_only(), ); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_blocks_non_matching_source_ip() { let filter = FilterConfig::new().mode(FilterMode::Allow).with_ip_filter( IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IP: {e}")) .source_only(), ); let src_ip: IpAddr = "10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_combined_filters() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)) .with_ip_filter( IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IP: {e}")) .source_only(), ); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 80)); let wrong_src: IpAddr = "10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&wrong_src, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_deny_mode() { let filter = FilterConfig::new() .mode(FilterMode::Deny) .with_port_filter(PortFilter::new().destination(22)); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(!filter.should_process(&src_ip, &dst_ip, 12345, 22)); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); } #[test] fn test_raw_filter_no_filter_allows_all() { let filter = FilterConfig::new(); let src_ip: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); let dst_ip: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IP: {e}")); assert!(filter.should_process(&src_ip, &dst_ip, 12345, 443)); assert!(filter.should_process(&src_ip, &dst_ip, 54321, 80)); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/akamai_extractor.rs
huginn-net-http/tests/akamai_extractor.rs
use huginn_net_http::akamai_extractor::{ parse_priority_payload, parse_settings_payload, parse_window_update_payload, }; use huginn_net_http::{SettingId, SettingParameter}; #[test] fn test_parse_settings_payload() { let payload = vec![ 0x00, 0x01, // ID: HEADER_TABLE_SIZE (1) 0x00, 0x00, 0x10, 0x00, // Value: 4096 0x00, 0x02, // ID: ENABLE_PUSH (2) 0x00, 0x00, 0x00, 0x00, // Value: 0 ]; let settings = parse_settings_payload(&payload); assert_eq!(settings.len(), 2); assert_eq!(settings[0].id, SettingId::HeaderTableSize); assert_eq!(settings[0].value, 4096); assert_eq!(settings[1].id, SettingId::EnablePush); assert_eq!(settings[1].value, 0); } #[test] fn test_parse_settings_payload_chrome() { let payload = vec![ 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, // HEADER_TABLE_SIZE: 65536 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, // ENABLE_PUSH: 0 0x00, 0x03, 0x00, 0x00, 0x03, 0xE8, // MAX_CONCURRENT_STREAMS: 1000 0x00, 0x04, 0x00, 0x60, 0x00, 0x00, // INITIAL_WINDOW_SIZE: 6291456 (0x600000) 0x00, 0x05, 0x00, 0x00, 0x40, 0x00, // MAX_FRAME_SIZE: 16384 0x00, 0x06, 0x00, 0x04, 0x00, 0x00, // MAX_HEADER_LIST_SIZE: 262144 ]; let settings = parse_settings_payload(&payload); assert_eq!(settings.len(), 6); assert_eq!(settings[0], SettingParameter { id: SettingId::HeaderTableSize, value: 65536 }); assert_eq!(settings[1], SettingParameter { id: SettingId::EnablePush, value: 0 }); assert_eq!( settings[2], SettingParameter { id: SettingId::MaxConcurrentStreams, value: 1000 } ); assert_eq!( settings[3], SettingParameter { id: SettingId::InitialWindowSize, value: 6291456 } ); assert_eq!(settings[4], SettingParameter { id: SettingId::MaxFrameSize, value: 16384 }); assert_eq!( settings[5], SettingParameter { id: SettingId::MaxHeaderListSize, value: 262144 } ); } #[test] fn test_parse_window_update_payload() { let payload = vec![0x00, 0xEF, 0x00, 0x01]; // 15663105 = 0xEF0001 let result = parse_window_update_payload(&payload); assert!(result.is_some(), "Failed to parse valid WINDOW_UPDATE payload"); if let Some(increment) = result { assert_eq!(increment, 15663105); } } #[test] fn test_parse_window_update_payload_firefox() { let payload = vec![0x00, 0xBF, 0x00, 0x01]; // 12517377 = 0xBF0001 let result = parse_window_update_payload(&payload); assert!(result.is_some(), "Failed to parse valid Firefox WINDOW_UPDATE payload"); if let Some(increment) = result { assert_eq!(increment, 12517377); } } #[test] fn test_parse_window_update_payload_too_short() { let payload = vec![0x00, 0xEE, 0xFC]; let result = parse_window_update_payload(&payload); assert!(result.is_none()); } #[test] fn test_parse_priority_payload() { let payload = vec![ 0x00, 0x00, 0x00, 0x00, // depends_on: 0 (no exclusive bit) 220, // weight: 220 ]; let result = parse_priority_payload(1, &payload); assert!(result.is_some(), "Failed to parse valid PRIORITY payload"); if let Some(priority) = result { assert_eq!(priority.stream_id, 1); assert!(!priority.exclusive); assert_eq!(priority.depends_on, 0); assert_eq!(priority.weight, 220); } } #[test] fn test_parse_priority_payload_exclusive() { let payload = vec![ 0x80, 0x00, 0x00, 0x03, // depends_on: 3 with exclusive bit set 200, // weight: 200 ]; let result = parse_priority_payload(5, &payload); assert!(result.is_some(), "Failed to parse valid PRIORITY payload with exclusive bit"); if let Some(priority) = result { assert_eq!(priority.stream_id, 5); assert!(priority.exclusive); assert_eq!(priority.depends_on, 3); assert_eq!(priority.weight, 200); } } #[test] fn test_parse_priority_payload_too_short() { let payload = vec![0x00, 0x00, 0x00]; let result = parse_priority_payload(1, &payload); assert!(result.is_none()); } #[test] fn test_parse_settings_empty_payload() { let payload = vec![]; let settings = parse_settings_payload(&payload); assert!(settings.is_empty()); } #[test] fn test_parse_settings_incomplete_setting() { let payload = vec![ 0x00, 0x01, 0x00, 0x00, 0x10, 0x00, // Complete setting 0x00, 0x02, 0x00, // Incomplete setting (missing 3 bytes) ]; let settings = parse_settings_payload(&payload); assert_eq!(settings.len(), 1); assert_eq!(settings[0].id, SettingId::HeaderTableSize); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/filter_tests.rs
huginn-net-http/tests/filter_tests.rs
use huginn_net_http::{FilterConfig, FilterMode, IpFilter, PortFilter, SubnetFilter}; use std::net::IpAddr; #[test] fn test_port_filter_destination() { let filter = PortFilter::new().destination(443); assert!(filter.matches(12345, 443)); assert!(!filter.matches(12345, 80)); } #[test] fn test_port_filter_source() { let filter = PortFilter::new().source(12345); assert!(filter.matches(12345, 80)); assert!(!filter.matches(54321, 80)); } #[test] fn test_port_filter_list() { let filter = PortFilter::new().destination_list(vec![80, 443, 8080]); assert!(filter.matches(12345, 80)); assert!(filter.matches(12345, 443)); assert!(filter.matches(12345, 8080)); assert!(!filter.matches(12345, 22)); } #[test] fn test_port_filter_range() { let filter = PortFilter::new().destination_range(8000..9000); assert!(filter.matches(12345, 8000)); assert!(filter.matches(12345, 8500)); assert!(filter.matches(12345, 8999)); assert!(!filter.matches(12345, 9000)); assert!(!filter.matches(12345, 7999)); } #[test] fn test_port_filter_any_port() { let filter = PortFilter::new().destination(443).any_port(); assert!(filter.matches(12345, 443)); assert!(filter.matches(443, 80)); assert!(!filter.matches(12345, 80)); } #[test] fn test_port_filter_combined() { let filter = PortFilter::new().source(12345).destination(443); assert!(filter.matches(12345, 443)); assert!(!filter.matches(12345, 80)); assert!(!filter.matches(54321, 443)); } #[test] fn test_ip_filter_v4() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(filter.matches(&ip_other, &ip_match)); assert!(!filter.matches( &ip_other, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_ip_filter_v6() { let filter = IpFilter::new() .allow("2001:db8::1") .unwrap_or_else(|e| panic!("Invalid IPv6 address: {e}")); let ip_match: IpAddr = "2001:db8::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); let ip_other: IpAddr = "2001:db8::2" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(!filter.matches( &ip_other, &"2001:db8::3" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")) )); } #[test] fn test_ip_filter_source_only() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")) .source_only(); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_match, &ip_other)); assert!(!filter.matches(&ip_other, &ip_match)); } #[test] fn test_ip_filter_destination_only() { let filter = IpFilter::new() .allow("192.168.1.100") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")) .destination_only(); let ip_match: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.200" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_other, &ip_match)); assert!(!filter.matches(&ip_match, &ip_other)); } #[test] fn test_subnet_filter_v4() { let filter = SubnetFilter::new() .allow("192.168.1.0/24") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")); let ip_in: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "192.168.2.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip_in, &ip_out)); assert!(!filter.matches( &ip_out, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_subnet_filter_v6() { let filter = SubnetFilter::new() .allow("2001:db8::/32") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")); let ip_in: IpAddr = "2001:db8::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); let ip_out: IpAddr = "2001:db9::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")); assert!(filter.matches(&ip_in, &ip_out)); assert!(!filter.matches( &ip_out, &"2001:dba::1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv6: {e}")) )); } #[test] fn test_subnet_filter_multiple() { let filter = SubnetFilter::new() .allow_list(vec!["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]) .unwrap_or_else(|e| panic!("Invalid CIDR notations: {e}")); let ip1: IpAddr = "10.1.2.3" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "172.16.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip3: IpAddr = "192.168.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.matches(&ip1, &ip_out)); assert!(filter.matches(&ip2, &ip_out)); assert!(filter.matches(&ip3, &ip_out)); assert!(!filter.matches( &ip_out, &"1.1.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")) )); } #[test] fn test_combined_filter_allow() { let filter = FilterConfig::new() .mode(FilterMode::Allow) .with_port_filter(PortFilter::new().destination(443)) .with_subnet_filter( SubnetFilter::new() .allow("192.168.0.0/16") .unwrap_or_else(|e| panic!("Invalid CIDR notation: {e}")), ); let ip_in: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_out: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip_in, &ip_out, 12345, 443)); assert!(!filter.should_process(&ip_in, &ip_out, 12345, 80)); assert!(!filter.should_process( &ip_out, &"10.0.0.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")), 12345, 443 )); } #[test] fn test_combined_filter_deny() { let filter = FilterConfig::new() .mode(FilterMode::Deny) .with_subnet_filter( SubnetFilter::new() .allow_list(vec!["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"]) .unwrap_or_else(|e| panic!("Invalid CIDR notations: {e}")), ); let ip_private: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_public: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(!filter.should_process(&ip_private, &ip_public, 12345, 443)); assert!(filter.should_process(&ip_public, &ip_public, 12345, 443)); } #[test] fn test_no_filters() { let filter = FilterConfig::new(); let ip1: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip1, &ip2, 12345, 443)); assert!(filter.should_process(&ip2, &ip1, 80, 12345)); } #[test] fn test_port_only_filter() { let filter = FilterConfig::new().with_port_filter(PortFilter::new().destination(443)); let ip1: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip2: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip1, &ip2, 12345, 443)); assert!(!filter.should_process(&ip1, &ip2, 12345, 80)); } #[test] fn test_ip_only_filter() { let filter = FilterConfig::new().with_ip_filter( IpFilter::new() .allow("8.8.8.8") .unwrap_or_else(|e| panic!("Invalid IPv4 address: {e}")), ); let ip_match: IpAddr = "8.8.8.8" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); let ip_other: IpAddr = "192.168.1.100" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")); assert!(filter.should_process(&ip_match, &ip_other, 12345, 443)); assert!(filter.should_process(&ip_other, &ip_match, 12345, 443)); assert!(!filter.should_process( &ip_other, &"1.1.1.1" .parse() .unwrap_or_else(|e| panic!("Invalid IPv4: {e}")), 12345, 443 )); } #[test] fn test_invalid_ip() { let result = IpFilter::new().allow("not-an-ip"); assert!(result.is_err()); } #[test] fn test_invalid_cidr() { let result = SubnetFilter::new().allow("192.168.1.0/99"); assert!(result.is_err()); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http1_process.rs
huginn-net-http/tests/http1_process.rs
use huginn_net_db::http; use huginn_net_db::http::Header; use huginn_net_http::http1_process::{ has_complete_headers, parse_http1_request, parse_http1_response, }; use huginn_net_http::{http1_parser, http_common}; #[test] fn test_parse_http1_request() { let valid_request = b"GET / HTTP/1.1\r\n\ Host: example.com\r\n\ Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\r\n\ Accept-Language: en-US,en;q=0.9,es;q=0.8\r\n\ Cache-Control: max-age=0\r\n\ Connection: keep-alive\r\n\ If-Modified-Since: Thu, 17 Oct 2019 07:18:26 GMT\r\n\ If-None-Match: \"3147526947\"\r\n\ Upgrade-Insecure-Requests: 1\r\n\ User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36\r\n\ \r\n"; let parser = http1_parser::Http1Parser::new(); match parse_http1_request(valid_request, &parser) { Ok(Some(request)) => { assert_eq!(request.lang, Some("English".to_string())); assert_eq!(request.user_agent, Some("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36".to_string())); assert_eq!(request.matching.version, http::Version::V11); let expected_horder = vec![ http::Header::new("Host"), http::Header::new("Accept").with_value("text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"), http::Header::new("Accept-Language").with_value("en-US,en;q=0.9,es;q=0.8"), http::Header::new("Cache-Control").optional(), http::Header::new("Connection").with_value("keep-alive"), http::Header::new("If-Modified-Since").optional(), Header::new("If-None-Match").optional(), http::Header::new("Upgrade-Insecure-Requests").with_value("1"), http::Header::new("User-Agent"), ]; assert_eq!(request.matching.horder, expected_horder); let expected_habsent = vec![ http::Header::new("Accept-Encoding"), http::Header::new("Accept-Charset"), http::Header::new("Keep-Alive"), ]; assert_eq!(request.matching.habsent, expected_habsent); assert_eq!(request.matching.expsw, "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"); } Ok(None) => panic!("Incomplete HTTP request"), Err(e) => panic!("Failed to parse HTTP request: {e}"), } } #[test] fn test_parse_http1_response() { let valid_response = b"HTTP/1.1 200 OK\r\n\ Server: Apache\r\n\ Content-Type: text/html; charset=UTF-8\r\n\ Content-Length: 112\r\n\ Connection: keep-alive\r\n\ \r\n\ <html><body><h1>It works!</h1></body></html>"; let parser = http1_parser::Http1Parser::new(); match parse_http1_response(valid_response, &parser) { Ok(Some(response)) => { assert_eq!(response.matching.expsw, "Apache"); assert_eq!(response.matching.version, http::Version::V11); let expected_horder = vec![ http::Header::new("Server"), http::Header::new("Content-Type"), http::Header::new("Content-Length").optional(), http::Header::new("Connection").with_value("keep-alive"), ]; assert_eq!(response.matching.horder, expected_horder); let expected_absent = vec![ http::Header::new("Keep-Alive"), http::Header::new("Accept-Ranges"), http::Header::new("Date"), ]; assert_eq!(response.matching.habsent, expected_absent); } Ok(None) => panic!("Incomplete HTTP response"), Err(e) => panic!("Failed to parse HTTP response: {e}"), } } #[test] fn test_get_diagnostic_for_empty_sw() { let diagnosis: http::HttpDiagnosis = http_common::get_diagnostic(None, None, None); assert_eq!(diagnosis, http::HttpDiagnosis::Anonymous); } #[test] fn test_get_diagnostic_with_existing_signature_matcher() { let user_agent: Option<String> = Some("Mozilla/5.0".to_string()); let os = "Linux".to_string(); let browser = Some("Firefox".to_string()); let ua_matcher: Option<(&String, &Option<String>)> = Some((&os, &browser)); let label = huginn_net_db::Label { ty: huginn_net_db::Type::Specified, class: None, name: "Linux".to_string(), flavor: None, }; let signature_os_matcher: Option<&huginn_net_db::Label> = Some(&label); let diagnosis = http_common::get_diagnostic(user_agent, ua_matcher, signature_os_matcher); assert_eq!(diagnosis, http::HttpDiagnosis::Generic); } #[test] fn test_get_diagnostic_with_dishonest_user_agent() { let user_agent = Some("Mozilla/5.0".to_string()); let os = "Windows".to_string(); let browser = Some("Firefox".to_string()); let ua_matcher: Option<(&String, &Option<String>)> = Some((&os, &browser)); let label = huginn_net_db::Label { ty: huginn_net_db::Type::Specified, class: None, name: "Linux".to_string(), flavor: None, }; let signature_os_matcher: Option<&huginn_net_db::Label> = Some(&label); let diagnosis = http_common::get_diagnostic(user_agent, ua_matcher, signature_os_matcher); assert_eq!(diagnosis, http::HttpDiagnosis::Dishonest); } #[test] fn test_get_diagnostic_without_user_agent_and_signature_matcher() { let user_agent = Some("Mozilla/5.0".to_string()); let diagnosis = http_common::get_diagnostic(user_agent, None, None); assert_eq!(diagnosis, http::HttpDiagnosis::None); } #[test] fn test_incomplete_headers() { let data = b"GET /path HTTP/1.1\r\nHost: example.com\r\n"; assert!(!has_complete_headers(data)); } #[test] fn test_complete_headers() { let data = b"GET /path HTTP/1.1\r\nHost: example.com\r\nCookie: session=abc\r\n\r\n"; assert!(has_complete_headers(data)); } #[test] fn test_complete_headers_with_body() { let data = b"GET /path HTTP/1.1\r\nHost: example.com\r\n\r\nbody data here"; assert!(has_complete_headers(data)); } #[test] fn test_empty_data() { let data = b""; assert!(!has_complete_headers(data)); } #[test] fn test_too_short_data() { let data = b"GET"; assert!(!has_complete_headers(data)); } #[test] fn test_response_headers() { let data = b"HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nSet-Cookie: id=123\r\n\r\n"; assert!(has_complete_headers(data)); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/packet_hash.rs
huginn-net-http/tests/packet_hash.rs
use huginn_net_http::packet_hash::*; #[test] fn test_hash_ipv4_tcp_flow() { // Ethernet + IPv4 + TCP packet let mut packet = vec![0u8; 54]; // Ethernet header packet[12] = 0x08; // IPv4 EtherType packet[13] = 0x00; // IPv4 header (starts at byte 14) packet[14] = 0x45; // Version 4, IHL 5 (20 bytes) packet[23] = 6; // Protocol: TCP // Source IP: 192.168.1.1 packet[26] = 192; packet[27] = 168; packet[28] = 1; packet[29] = 1; // Dest IP: 10.0.0.1 packet[30] = 10; packet[31] = 0; packet[32] = 0; packet[33] = 1; // TCP header (starts at byte 34) packet[34] = 0x00; // Source port: 80 packet[35] = 0x50; packet[36] = 0x1F; // Dest port: 8080 packet[37] = 0x90; let worker1 = hash_flow(&packet, 4); let worker2 = hash_flow(&packet, 4); // Same packet should always go to same worker assert_eq!(worker1, worker2); assert!(worker1 < 4); } #[test] fn test_hash_ipv4_different_flows() { let mut packet1 = vec![0u8; 54]; let mut packet2 = vec![0u8; 54]; // Setup both as valid IPv4 TCP packets for packet in [&mut packet1, &mut packet2] { packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x45; packet[23] = 6; } // Packet 1: 192.168.1.1:80 -> 10.0.0.1:8080 packet1[26..30].copy_from_slice(&[192, 168, 1, 1]); packet1[30..34].copy_from_slice(&[10, 0, 0, 1]); packet1[34..36].copy_from_slice(&[0x00, 0x50]); // port 80 packet1[36..38].copy_from_slice(&[0x1F, 0x90]); // port 8080 // Packet 2: 192.168.1.2:80 -> 10.0.0.1:8080 (different source IP) packet2[26..30].copy_from_slice(&[192, 168, 1, 2]); packet2[30..34].copy_from_slice(&[10, 0, 0, 1]); packet2[34..36].copy_from_slice(&[0x00, 0x50]); packet2[36..38].copy_from_slice(&[0x1F, 0x90]); let worker1 = hash_flow(&packet1, 4); let worker2 = hash_flow(&packet2, 4); // Different flows should likely go to different workers // (not guaranteed, but very likely with good hash function) assert!(worker1 < 4); assert!(worker2 < 4); } #[test] fn test_hash_fallback_on_short_packet() { let short_packet = vec![0u8; 10]; let worker = hash_flow(&short_packet, 4); assert!(worker < 4); } #[test] fn test_hash_fallback_on_invalid_ip_version() { let mut packet = vec![0u8; 60]; packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x75; // Invalid IP version (7) let worker = hash_flow(&packet, 4); assert!(worker < 4); } #[test] fn test_hash_non_tcp_protocol() { let mut packet = vec![0u8; 54]; packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x45; packet[23] = 17; // UDP, not TCP packet[26..30].copy_from_slice(&[192, 168, 1, 1]); let worker = hash_flow(&packet, 4); assert!(worker < 4); } #[test] fn test_hash_ipv6_tcp_flow() { let mut packet = vec![0u8; 74]; // Ethernet header packet[12] = 0x86; // IPv6 EtherType packet[13] = 0xDD; // IPv6 header (starts at byte 14) packet[14] = 0x60; // Version 6 packet[20] = 6; // Next header: TCP // Source IP (16 bytes) packet[22..38].copy_from_slice(&[ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, ]); // Dest IP (16 bytes) packet[38..54].copy_from_slice(&[ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, ]); // TCP header packet[54..56].copy_from_slice(&[0x00, 0x50]); // port 80 packet[56..58].copy_from_slice(&[0x1F, 0x90]); // port 8080 let worker1 = hash_flow(&packet, 4); let worker2 = hash_flow(&packet, 4); assert_eq!(worker1, worker2); assert!(worker1 < 4); } #[test] fn test_hash_consistency_across_workers() { let mut packet = vec![0u8; 54]; packet[12] = 0x08; packet[13] = 0x00; packet[14] = 0x45; packet[23] = 6; packet[26..30].copy_from_slice(&[192, 168, 1, 1]); packet[30..34].copy_from_slice(&[10, 0, 0, 1]); packet[34..36].copy_from_slice(&[0x00, 0x50]); packet[36..38].copy_from_slice(&[0x1F, 0x90]); // Same packet should map consistently regardless of worker count let worker_2 = hash_flow(&packet, 2); let worker_4 = hash_flow(&packet, 4); let worker_8 = hash_flow(&packet, 8); assert!(worker_2 < 2); assert!(worker_4 < 4); assert!(worker_8 < 8); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http2_parser.rs
huginn-net-http/tests/http2_parser.rs
use huginn_net_http::http2_parser::{ is_http2_traffic, Http2Config, Http2FrameType, Http2ParseError, Http2Parser, HTTP2_CONNECTION_PREFACE, }; fn assert_parser_error<T>( result: Result<Option<T>, Http2ParseError>, expected_discriminant: Http2ParseError, ) { match result { Err(actual) => assert_eq!( std::mem::discriminant(&actual), std::mem::discriminant(&expected_discriminant), "Expected error type {expected_discriminant:?} but got {actual:?}" ), Ok(_) => panic!("Expected error {expected_discriminant:?} but got Ok"), } } fn create_http2_frame(frame_type: u8, stream_id: u32, payload: &[u8]) -> Vec<u8> { let mut frame = Vec::new(); // Length (24 bits) let length = payload.len() as u32; frame.push(((length >> 16) & 0xFF) as u8); frame.push(((length >> 8) & 0xFF) as u8); frame.push((length & 0xFF) as u8); // Type (8 bits) frame.push(frame_type); // Flags (8 bits) frame.push(0x00); // Stream ID (32 bits, with R bit cleared) frame.extend_from_slice(&(stream_id & 0x7FFFFFFF).to_be_bytes()); // Payload frame.extend_from_slice(payload); frame } fn create_http2_request_with_preface(frames: &[Vec<u8>]) -> Vec<u8> { let mut data = Vec::from(HTTP2_CONNECTION_PREFACE); for frame in frames { data.extend_from_slice(frame); } data } #[test] fn test_http2_preface_detection() { let http2_data = HTTP2_CONNECTION_PREFACE; assert!(is_http2_traffic(http2_data)); let http1_data = b"GET / HTTP/1.1\r\n"; assert!(!is_http2_traffic(http1_data)); // Edge case: partial preface let partial_preface = &HTTP2_CONNECTION_PREFACE[..10]; assert!(!is_http2_traffic(partial_preface)); // Edge case: empty data assert!(!is_http2_traffic(&[])); } #[test] fn test_frame_type_conversion() { assert_eq!(Http2FrameType::from(0x0), Http2FrameType::Data); assert_eq!(Http2FrameType::from(0x1), Http2FrameType::Headers); assert_eq!(Http2FrameType::from(0x2), Http2FrameType::Priority); assert_eq!(Http2FrameType::from(0x3), Http2FrameType::RstStream); assert_eq!(Http2FrameType::from(0x4), Http2FrameType::Settings); assert_eq!(Http2FrameType::from(0x5), Http2FrameType::PushPromise); assert_eq!(Http2FrameType::from(0x6), Http2FrameType::Ping); assert_eq!(Http2FrameType::from(0x7), Http2FrameType::GoAway); assert_eq!(Http2FrameType::from(0x8), Http2FrameType::WindowUpdate); assert_eq!(Http2FrameType::from(0x9), Http2FrameType::Continuation); assert_eq!(Http2FrameType::from(0xFF), Http2FrameType::Unknown(0xFF)); } #[test] fn test_invalid_preface() { let parser = Http2Parser::new(); let invalid_data = b"GET / HTTP/1.1\r\n\r\n"; assert_parser_error(parser.parse_request(invalid_data), Http2ParseError::InvalidPreface); } #[test] fn test_empty_data() { let parser = Http2Parser::new(); // Empty data should fail preface check assert_parser_error(parser.parse_request(&[]), Http2ParseError::InvalidPreface); // Only preface, no frames let result = parser.parse_request(HTTP2_CONNECTION_PREFACE); match result { Ok(None) => {} // Expected: no request without frames Ok(Some(_)) => panic!("Should not return a request without frames"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_incomplete_frame_header() { let parser = Http2Parser::new(); // Preface + incomplete frame header (less than 9 bytes) let mut data = Vec::from(HTTP2_CONNECTION_PREFACE); data.extend_from_slice(&[0x00, 0x00, 0x00, 0x01]); // Only 4 bytes of frame header let result = parser.parse_request(&data); match result { Ok(None) => {} // Expected: incomplete data Ok(Some(_)) => panic!("Should not return a request with incomplete data"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_frame_too_large() { let parser = Http2Parser::new(); // Create frame with length exceeding max_frame_size (16384) let large_length = 20000u32; let mut frame = Vec::new(); // Length (24 bits) - exceeds max_frame_size frame.push(((large_length >> 16) & 0xFF) as u8); frame.push(((large_length >> 8) & 0xFF) as u8); frame.push((large_length & 0xFF) as u8); // Complete frame header frame.extend_from_slice(&[0x01, 0x00, 0x00, 0x00, 0x00, 0x01]); // Headers frame, stream 1 let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); match result { Ok(None) => {} // Expected: frame too large Ok(Some(_)) => panic!("Should not return a request with frame too large"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_incomplete_frame_payload() { let parser = Http2Parser::new(); // Frame header says 100 bytes payload, but we only provide 50 let mut frame = Vec::new(); frame.extend_from_slice(&[0x00, 0x00, 0x64]); // Length: 100 frame.extend_from_slice(&[0x01, 0x00, 0x00, 0x00, 0x00, 0x01]); // Headers frame, stream 1 frame.extend_from_slice(&[0x00; 50]); // Only 50 bytes instead of 100 let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); match result { Ok(None) => {} // Expected: incomplete payload Ok(Some(_)) => panic!("Should not return a request with incomplete payload"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_zero_length_frame() { let parser = Http2Parser::new(); // Valid zero-length frame let frame = create_http2_frame(0x04, 0, &[]); // Settings frame with no payload let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); match result { Ok(None) => {} // Expected: no headers frame Ok(Some(_)) => panic!("Should not return a request without headers frame"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_maximum_valid_frame_size() { let parser = Http2Parser::new(); // Frame with exactly max_frame_size (16384 bytes) let max_payload = vec![0x00; 16384]; let frame = create_http2_frame(0x00, 1, &max_payload); // Data frame let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); match result { Ok(None) => {} // Expected: no headers frame, only data frame Ok(Some(_)) => panic!("Should not return a request without headers frame"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_invalid_stream_id_zero_for_headers() { let parser = Http2Parser::new(); // Headers frame with stream ID 0 (invalid) let frame = create_http2_frame(0x01, 0, &[0x00]); // Headers frame, stream 0 let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); match result { Ok(None) => {} // Expected: invalid stream ID 0 Ok(Some(_)) => panic!("Should not return a request with invalid stream ID"), Err(e) => panic!("Should not error: {e:?}"), } } #[test] fn test_multiple_frames_parsing() { let parser = Http2Parser::new(); // Multiple frames: Settings + Headers let settings_frame = create_http2_frame(0x04, 0, &[]); // Settings frame let headers_frame = create_http2_frame(0x01, 1, &[0x00]); // Headers frame let data = create_http2_request_with_preface(&[settings_frame, headers_frame]); let result = parser.parse_request(&data); // Should handle gracefully - either Ok(None) or Err due to invalid HPACK match result { Ok(None) => { // Expected: no valid request parsed due to invalid HPACK } Err(Http2ParseError::HpackDecodingFailed) => { // Also expected: HPACK decoding failed } other => { panic!("Unexpected result: {other:?}"); } } } #[test] fn test_arithmetic_overflow_protection() { let parser = Http2Parser::new(); // Test frame length that would cause overflow when added to 9 let mut frame = Vec::new(); frame.extend_from_slice(&[0xFF, 0xFF, 0xFF]); // Maximum 24-bit length frame.extend_from_slice(&[0x01, 0x00, 0x00, 0x00, 0x00, 0x01]); // Headers frame let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should handle overflow gracefully without panicking assert!(result.is_ok()); } #[test] fn test_hpack_decoding_failure() { let parser = Http2Parser::new(); // Headers frame with invalid HPACK data let invalid_hpack = vec![0xFF; 10]; // Invalid HPACK data let frame = create_http2_frame(0x01, 1, &invalid_hpack); let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should handle HPACK decoding failure gracefully match result { Ok(None) => {} // Expected: HPACK decoding failed or no valid request Ok(Some(_)) => panic!("Should not return a request with HPACK decoding failure"), Err(_) => {} // Also expected: HPACK decoding error } } #[test] fn test_missing_required_headers() { let parser = Http2Parser::new(); // This test would require valid HPACK encoding without required pseudo-headers // For now, we test the error path exists let frame = create_http2_frame(0x01, 1, &[0x00]); let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should either fail HPACK decoding or missing headers check match result { Ok(None) => {} // Expected: missing required headers or HPACK failure Ok(Some(_)) => panic!("Should not return a request with missing required headers"), Err(_) => {} // Also expected: HPACK decoding error } } #[test] fn test_response_parsing_without_preface() { let parser = Http2Parser::new(); // Response parsing doesn't require preface let frame = create_http2_frame(0x01, 1, &[0x00]); // Headers frame let result = parser.parse_response(&frame); // Should handle gracefully (likely HPACK failure) match result { Ok(None) => {} // Expected: HPACK failure or no valid response Ok(Some(_)) => panic!("Should not return a response with HPACK failure"), Err(_) => {} // Also expected: HPACK decoding error } } #[test] fn test_frame_parsing_edge_cases() { let parser = Http2Parser::new(); // Test various edge cases in frame parsing let test_cases = [ // Case 1: Frame with reserved bit set in stream ID { let mut frame = create_http2_frame(0x01, 1, &[0x00]); // Set reserved bit in stream ID frame[5] |= 0x80; frame }, // Case 2: Continuation frame (should be handled) create_http2_frame(0x09, 1, &[0x00]), // Case 3: Unknown frame type create_http2_frame(0xFF, 1, &[0x00]), ]; for (i, frame) in test_cases.iter().enumerate() { let data = create_http2_request_with_preface(std::slice::from_ref(frame)); let result = parser.parse_request(&data); // All should handle gracefully without panicking assert!(result.is_ok() || result.is_err(), "Test case {i} failed"); } } #[test] fn test_settings_frame_parsing() { let parser = Http2Parser::new(); // Valid settings frame with some settings let mut settings_payload = Vec::new(); // SETTINGS_HEADER_TABLE_SIZE = 4096 settings_payload.extend_from_slice(&[0x00, 0x01]); // ID settings_payload.extend_from_slice(&[0x00, 0x00, 0x10, 0x00]); // Value: 4096 // SETTINGS_ENABLE_PUSH = 0 settings_payload.extend_from_slice(&[0x00, 0x02]); // ID settings_payload.extend_from_slice(&[0x00, 0x00, 0x00, 0x00]); // Value: 0 let settings_frame = create_http2_frame(0x04, 0, &settings_payload); let headers_frame = create_http2_frame(0x01, 1, &[0x00]); let data = create_http2_request_with_preface(&[settings_frame, headers_frame]); let result = parser.parse_request(&data); // Should parse settings and attempt headers (likely fail on HPACK) assert!(result.is_ok() || result.is_err()); } #[test] fn test_cookie_parsing_edge_cases() { use huginn_net_http::http_common::HttpHeader; let parser = Http2Parser::new(); let test_cases = vec![ ("", 0), ("name=value", 1), ("name=", 1), ("name", 1), ("name=value; other=test", 2), (" name = value ", 1), ("name=value;", 1), (";name=value", 1), ("name=value;;other=test", 2), ]; for (cookie_str, expected_count) in test_cases { let headers = [HttpHeader { name: "cookie".to_string(), value: if cookie_str.is_empty() { None } else { Some(cookie_str.to_string()) }, position: 0, source: huginn_net_http::http_common::HeaderSource::Http2Header, }]; let cookie_headers: Vec<&HttpHeader> = headers .iter() .filter(|h| h.name.to_lowercase() == "cookie") .collect(); let cookies = parser.parse_cookies_from_headers(&cookie_headers); assert_eq!(cookies.len(), expected_count, "Failed for case: '{cookie_str}'"); match cookie_str { "" => { assert!(cookies.is_empty()); } "name=value" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); } "name=" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("".to_string())); } "name" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, None); } "name=value; other=test" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[1].name, "other"); assert_eq!(cookies[1].value, Some("test".to_string())); } " name = value " => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); } "name=value;" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); } ";name=value" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); } "name=value;;other=test" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[1].name, "other"); assert_eq!(cookies[1].value, Some("test".to_string())); } _ => {} } } } #[test] fn test_multiple_cookie_headers_http2() { use huginn_net_http::http_common::HttpHeader; let parser = Http2Parser::new(); // HTTP/2 can have multiple cookie headers according to RFC 7540 let headers = [ HttpHeader { name: "cookie".to_string(), value: Some("session_id=abc123".to_string()), position: 0, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader { name: "cookie".to_string(), value: Some("user_id=456".to_string()), position: 1, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader { name: "cookie".to_string(), value: Some("theme=dark; lang=en".to_string()), position: 2, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, ]; let cookie_headers: Vec<&HttpHeader> = headers .iter() .filter(|h| h.name.to_lowercase() == "cookie") .collect(); let cookies = parser.parse_cookies_from_headers(&cookie_headers); assert_eq!(cookies.len(), 4); assert_eq!(cookies[0].name, "session_id"); assert_eq!(cookies[0].value, Some("abc123".to_string())); assert_eq!(cookies[1].name, "user_id"); assert_eq!(cookies[1].value, Some("456".to_string())); assert_eq!(cookies[2].name, "theme"); assert_eq!(cookies[2].value, Some("dark".to_string())); assert_eq!(cookies[3].name, "lang"); assert_eq!(cookies[3].value, Some("en".to_string())); } #[test] fn test_security_malformed_frames() { let parser = Http2Parser::new(); // Test cases that could potentially cause security issues let malicious_cases = [ // Case 1: Frame with malformed length field vec![0xFF, 0xFF, 0xFF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01], // Case 2: Frame with zero stream ID for non-connection frames vec![0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF], // Case 3: Extremely large payload declaration vec![0x7F, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01], // Case 4: Invalid frame type with large payload vec![0x00, 0x10, 0x00, 0xFE, 0xFF, 0x80, 0x00, 0x00, 0x01], ]; for malicious_frame in malicious_cases.iter() { let data = create_http2_request_with_preface(std::slice::from_ref(malicious_frame)); let result = parser.parse_request(&data); // Should handle all malicious cases without panicking match result { Ok(_) | Err(_) => { // Both outcomes are acceptable as long as no panic occurs } } // Also test response parsing let response_result = parser.parse_response(malicious_frame); match response_result { Ok(_) | Err(_) => { // Both outcomes are acceptable as long as no panic occurs } } } } #[test] fn test_memory_exhaustion_protection() { let parser = Http2Parser::new(); // Test with many small frames to ensure no memory exhaustion let mut frames = Vec::new(); for i in 0..1000 { let frame = create_http2_frame(0x00, (i % 100) + 1, &[0x00]); // Data frames frames.push(frame); } let data = create_http2_request_with_preface(&frames); let result = parser.parse_request(&data); // Should handle large number of frames gracefully assert!(result.is_ok()); } #[test] fn test_stream_id_edge_cases() { let parser = Http2Parser::new(); let test_cases = vec![ (0x00000001, true), // Valid client stream ID (0x00000003, true), // Valid client stream ID (0x00000002, true), // Valid server stream ID (should still parse) (0x7FFFFFFF, true), // Maximum valid stream ID (0x80000001, true), // Stream ID with reserved bit (should be masked) (0x00000000, false), // Invalid for headers frame ]; for (stream_id, should_find_stream) in test_cases { let frame = create_http2_frame(0x01, stream_id, &[0x00]); // Headers frame let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); if should_find_stream { // Should attempt to parse (likely fail on HPACK but find the stream) match result { Ok(None) => {} // Expected: HPACK failure but stream found Ok(Some(_)) => panic!("Should not return a request with HPACK failure"), Err(_) => {} // Also expected: HPACK decoding error } } else { // Should return None (no valid stream found) match result { Ok(None) => {} // Expected: no valid stream Ok(Some(_)) => panic!("Should not return a request with invalid stream"), Err(e) => panic!("Should not error for invalid stream: {e:?}"), } } } } #[test] fn test_frame_flag_handling() { let parser = Http2Parser::new(); // Test different flag combinations let flag_cases = vec![ 0x00, // No flags 0x01, // END_STREAM 0x04, // END_HEADERS 0x05, // END_STREAM | END_HEADERS 0x08, // PADDED 0x20, // PRIORITY 0xFF, // All flags set ]; for flags in flag_cases { let mut frame = create_http2_frame(0x01, 1, &[0x00]); // Headers frame frame[4] = flags; // Set flags byte let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should handle all flag combinations without panicking assert!(result.is_ok() || result.is_err()); } } #[test] fn test_utf8_validation() { let parser = Http2Parser::new(); // Test cases with invalid UTF-8 sequences let invalid_utf8_cases = vec![ vec![0xFF, 0xFE, 0xFD], // Invalid UTF-8 start bytes vec![0x80, 0x80, 0x80], // Invalid continuation bytes vec![0xC0, 0x80], // Overlong encoding vec![0xED, 0xA0, 0x80], // Surrogate pair ]; for invalid_utf8 in invalid_utf8_cases { let frame = create_http2_frame(0x01, 1, &invalid_utf8); let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should handle invalid UTF-8 gracefully match result { Ok(_) => { // If it succeeds, the UTF-8 handling converted it safely } Err(Http2ParseError::HpackDecodingFailed) => { // Expected: HPACK decoder rejected invalid data } Err(Http2ParseError::InvalidUtf8) => { // Also acceptable: explicit UTF-8 validation } Err(_) => { // Other errors are also acceptable } } } } #[test] fn test_error_display_formatting() { // Test that all error types format correctly let errors = vec![ Http2ParseError::InvalidPreface, Http2ParseError::InvalidFrameHeader, Http2ParseError::InvalidFrameLength(12345), Http2ParseError::InvalidStreamId(67890), Http2ParseError::FrameTooLarge(999999), Http2ParseError::MissingRequiredHeaders, Http2ParseError::InvalidPseudoHeader(":invalid".to_string()), Http2ParseError::IncompleteFrame, Http2ParseError::InvalidUtf8, Http2ParseError::UnsupportedFeature("test".to_string()), Http2ParseError::HpackDecodingFailed, ]; for error in errors { let formatted = format!("{error}"); assert!(!formatted.is_empty()); assert!(!formatted.contains("Debug")); // Should be Display, not Debug } } #[test] fn test_config_edge_cases() { // Test parser with different configurations let config = Http2Config { max_frame_size: 1, // Very small max frame size strict_parsing: true, ..Default::default() }; let parser = Http2Parser::with_config(config); // Even small valid frames should be rejected let frame = create_http2_frame(0x01, 1, &[0x00, 0x00]); // 2 bytes > max_frame_size(1) let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should handle configuration limits gracefully assert!(result.is_ok()); } #[test] fn test_multiple_streams_handling() { let parser = Http2Parser::new(); // Create frames for multiple streams let frame1 = create_http2_frame(0x01, 1, &[0x00]); // Headers for stream 1 let frame2 = create_http2_frame(0x01, 3, &[0x00]); // Headers for stream 3 let frame3 = create_http2_frame(0x00, 1, &[0x48, 0x65, 0x6c, 0x6c, 0x6f]); // Data for stream 1 let data = create_http2_request_with_preface(&[frame1, frame2, frame3]); let result = parser.parse_request(&data); // Should handle multiple streams and pick the first valid one match result { Ok(Some(req)) => { assert_eq!(req.stream_id, 1); // Should pick stream 1 (first HEADERS frame) } Ok(None) => {} Err(_) => {} } } #[test] fn test_continuation_frames() { let parser = Http2Parser::new(); // Create HEADERS frame followed by CONTINUATION frame let headers_frame = create_http2_frame(0x01, 1, &[0x00, 0x01]); // Headers (incomplete) let continuation_frame = create_http2_frame(0x09, 1, &[0x02, 0x03]); // Continuation let data = create_http2_request_with_preface(&[headers_frame, continuation_frame]); let result = parser.parse_request(&data); // Should handle CONTINUATION frames properly match result { Ok(_) | Err(_) => {} // Both outcomes acceptable as long as no panic } } #[test] fn test_settings_frame_with_invalid_payload() { let parser = Http2Parser::new(); // Create SETTINGS frame with invalid payload (not multiple of 6 bytes) let invalid_settings = create_http2_frame(0x04, 0, &[0x00, 0x01, 0x00, 0x00, 0x10]); // 5 bytes instead of 6 let headers_frame = create_http2_frame(0x01, 1, &[0x00]); let data = create_http2_request_with_preface(&[invalid_settings, headers_frame]); let result = parser.parse_request(&data); // Should handle malformed SETTINGS gracefully match result { Ok(_) | Err(_) => {} // Both outcomes acceptable } } #[test] fn test_priority_frame_handling() { let parser = Http2Parser::new(); // Create PRIORITY frame followed by HEADERS let priority_frame = create_http2_frame(0x02, 1, &[0x00, 0x00, 0x00, 0x02, 0x10]); // Priority frame let headers_frame = create_http2_frame(0x01, 1, &[0x00]); let data = create_http2_request_with_preface(&[priority_frame, headers_frame]); let result = parser.parse_request(&data); // Should handle PRIORITY frames without issues match result { Ok(_) | Err(_) => {} // Both outcomes acceptable } } #[test] fn test_window_update_frame() { let parser = Http2Parser::new(); // Create WINDOW_UPDATE frame let window_update = create_http2_frame(0x08, 0, &[0x00, 0x00, 0x10, 0x00]); // Window update let headers_frame = create_http2_frame(0x01, 1, &[0x00]); let data = create_http2_request_with_preface(&[window_update, headers_frame]); let result = parser.parse_request(&data); // Should handle WINDOW_UPDATE frames match result { Ok(_) | Err(_) => {} // Both outcomes acceptable } } #[test] fn test_data_frame_without_headers() { let parser = Http2Parser::new(); // Create DATA frame without preceding HEADERS let data_frame = create_http2_frame(0x00, 1, &[0x48, 0x65, 0x6c, 0x6c, 0x6f]); // "Hello" let data = create_http2_request_with_preface(&[data_frame]); let result = parser.parse_request(&data); // Should return None (no valid request without HEADERS) match result { Ok(None) => {} // Expected Ok(Some(_)) => panic!("Should not return request without HEADERS frame"), Err(_) => {} // Also acceptable } } #[test] fn test_stream_id_zero_for_headers() { let parser = Http2Parser::new(); // Create HEADERS frame with stream ID 0 (invalid for HEADERS) let invalid_headers = create_http2_frame(0x01, 0, &[0x00]); // Stream ID 0 is invalid for HEADERS let data = create_http2_request_with_preface(&[invalid_headers]); let result = parser.parse_request(&data); // Should return None (no valid stream found) match result { Ok(None) => {} // Expected Ok(Some(_)) => panic!("Should not return request with invalid stream ID"), Err(_) => {} // Also acceptable } } #[test] fn test_mixed_frame_types_sequence() { let parser = Http2Parser::new(); // Create a realistic sequence of frames let settings_frame = create_http2_frame(0x04, 0, &[0x00, 0x02, 0x00, 0x00, 0x00, 0x01]); // SETTINGS let window_update = create_http2_frame(0x08, 0, &[0x00, 0x00, 0x10, 0x00]); // WINDOW_UPDATE let headers_frame = create_http2_frame(0x01, 1, &[0x00]); // HEADERS let data_frame = create_http2_frame(0x00, 1, &[0x48, 0x65, 0x6c, 0x6c, 0x6f]); // DATA let data = create_http2_request_with_preface(&[ settings_frame, window_update, headers_frame, data_frame, ]); let result = parser.parse_request(&data); // Should handle mixed frame sequence properly match result { Ok(Some(req)) => { assert_eq!(req.stream_id, 1); assert!(req.frame_sequence.len() >= 2); // Should have at least SETTINGS and HEADERS } Ok(None) => {} // Acceptable if HPACK fails Err(_) => {} // Also acceptable for HPACK errors } } #[test] fn test_response_with_invalid_status() { let parser = Http2Parser::new(); // Create response frame that would result in missing :status pseudo-header let headers_frame = create_http2_frame(0x01, 1, &[0x00]); // Headers without proper HPACK encoding let result = parser.parse_response(&headers_frame); // Should handle missing :status gracefully match result { Ok(None) => {} // Expected when :status is missing Err(Http2ParseError::MissingRequiredHeaders) => {} // Also expected Err(Http2ParseError::HpackDecodingFailed) => {} // HPACK might fail first other => panic!("Unexpected result: {other:?}"), } } #[test] fn test_frame_flags_handling() { let parser = Http2Parser::new(); // Create frame with various flags set let mut frame = create_http2_frame(0x01, 1, &[0x00]); frame[4] = 0x05; // Set END_HEADERS (0x04) and END_STREAM (0x01) flags let data = create_http2_request_with_preface(&[frame]); let result = parser.parse_request(&data); // Should handle frame flags properly match result { Ok(_) | Err(_) => {} // Both outcomes acceptable } } #[test] fn test_large_stream_id() { let parser = Http2Parser::new(); // Create frame with maximum valid stream ID (2^31 - 1) let max_stream_id = 0x7FFFFFFF; let headers_frame = create_http2_frame(0x01, max_stream_id, &[0x00]); let data = create_http2_request_with_preface(&[headers_frame]); let result = parser.parse_request(&data); // Should handle large stream IDs match result { Ok(Some(req)) => { assert_eq!(req.stream_id, max_stream_id); } Ok(None) => {} // Acceptable if HPACK fails Err(_) => {} // Also acceptable } } #[test] fn test_empty_payload_frames() { let parser = Http2Parser::new(); // Create frames with empty payloads let empty_headers = create_http2_frame(0x01, 1, &[]); // Empty HEADERS let empty_data = create_http2_frame(0x00, 1, &[]); // Empty DATA let data = create_http2_request_with_preface(&[empty_headers, empty_data]); let result = parser.parse_request(&data); // Should handle empty payloads gracefully match result { Ok(_) | Err(_) => {} // Both outcomes acceptable } } #[test] fn test_cookie_and_referer_excluded_from_headers_list_http2() { use huginn_net_http::http_common::HttpHeader; let parser = Http2Parser::new(); // Create HTTP/2 headers including cookie and referer let headers = [ HttpHeader { name: ":method".to_string(), value: Some("GET".to_string()), position: 0, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader { name: ":path".to_string(), value: Some("/page".to_string()), position: 1, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader { name: ":authority".to_string(), value: Some("example.com".to_string()), position: 2, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader { name: "cookie".to_string(), value: Some("session=abc123".to_string()), position: 3, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader { name: "referer".to_string(), value: Some("https://google.com".to_string()), position: 4, source: huginn_net_http::http_common::HeaderSource::Http2Header, }, HttpHeader {
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
true
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/signature_matcher.rs
huginn-net-http/tests/signature_matcher.rs
use crate::http::Version as HttpVersion; use huginn_net_db::{http, Database, Type}; use huginn_net_http::{ObservableHttpRequest, ObservableHttpResponse, SignatureMatcher}; #[test] fn matching_firefox2_by_http_request() { let db = match Database::load_default() { Ok(db) => db, Err(e) => { panic!("Failed to create default database: {e}"); } }; let firefox_signature = ObservableHttpRequest { matching: huginn_net_db::observable_signals::HttpRequestObservation { version: HttpVersion::V10, horder: vec![ http::Header::new("Host"), http::Header::new("User-Agent"), http::Header::new("Accept").with_value(",*/*;q="), http::Header::new("Accept-Language").optional(), http::Header::new("Accept-Encoding").with_value("gzip,deflate"), http::Header::new("Accept-Charset").with_value("utf-8;q=0.7,*;q=0.7"), http::Header::new("Keep-Alive").with_value("300"), http::Header::new("Connection").with_value("keep-alive"), ], habsent: vec![], expsw: "Firefox/".to_string(), }, lang: None, user_agent: None, headers: vec![], cookies: vec![], referer: None, method: Some("GET".to_string()), uri: Some("/".to_string()), }; let matcher = SignatureMatcher::new(&db); if let Some((label, _matched_db_sig, quality)) = matcher.matching_by_http_request(&firefox_signature) { assert_eq!(label.name, "Firefox"); assert_eq!(label.class, None); assert_eq!(label.flavor, Some("2.x".to_string())); assert_eq!(label.ty, Type::Specified); assert_eq!(quality, 1.0); } else { panic!("No match found for Firefox 2.x HTTP signature"); } } #[test] fn matching_apache_by_http_response() { let db = match Database::load_default() { Ok(db) => db, Err(e) => { panic!("Failed to create default database: {e}"); } }; let apache_signature = ObservableHttpResponse { matching: huginn_net_db::observable_signals::HttpResponseObservation { version: HttpVersion::V11, horder: vec![ http::Header::new("Date"), http::Header::new("Server"), http::Header::new("Last-Modified").optional(), http::Header::new("Accept-Ranges") .optional() .with_value("bytes"), http::Header::new("Content-Length").optional(), http::Header::new("Content-Range").optional(), http::Header::new("Keep-Alive").with_value("timeout"), http::Header::new("Connection").with_value("Keep-Alive"), http::Header::new("Transfer-Encoding") .optional() .with_value("chunked"), http::Header::new("Content-Type"), ], habsent: vec![], expsw: "Apache".to_string(), }, headers: vec![], status_code: Some(200), }; let matcher = SignatureMatcher::new(&db); if let Some((label, _matched_db_sig, quality)) = matcher.matching_by_http_response(&apache_signature) { assert_eq!(label.name, "Apache"); assert_eq!(label.class, None); assert_eq!(label.flavor, Some("2.x".to_string())); assert_eq!(label.ty, Type::Specified); assert_eq!(quality, 1.0); } else { panic!("No match found for Apache 2.x HTTP response signature"); } } #[test] fn matching_android_chrome_by_http_request() { let db = match Database::load_default() { Ok(db) => db, Err(e) => { panic!("Failed to create default database: {e}"); } }; let android_chrome_signature = ObservableHttpRequest { matching: huginn_net_db::observable_signals::HttpRequestObservation { version: HttpVersion::V11, // HTTP/1.1 horder: vec![ http::Header::new("Host"), http::Header::new("Connection").with_value("keep-alive"), http::Header::new("User-Agent"), http::Header::new("Accept").with_value("image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8"), http::Header::new("Referer").optional(), // ?Referer http::Header::new("Accept-Encoding").with_value("gzip, deflate"), http::Header::new("Accept-Language").with_value("en-US,en;q=0.9,es;q=0.8"), ], habsent: vec![ http::Header::new("Accept-Charset"), http::Header::new("Keep-Alive"), ], expsw: "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Mobile Safari/537.36".to_string(), }, lang: Some("English".to_string()), user_agent: Some("Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Mobile Safari/537.36".to_string()), headers: vec![], cookies: vec![], referer: None, method: Some("GET".to_string()), uri: Some("/".to_string()), }; let matcher = SignatureMatcher::new(&db); match matcher.matching_by_http_request(&android_chrome_signature) { Some((label, _matched_db_sig, quality)) => { assert_eq!(label.name, "Chrome"); assert_eq!(label.class, None); assert_eq!(label.flavor, Some("11 or newer".to_string())); assert_eq!(label.ty, Type::Specified); assert_eq!(quality, 0.7); } None => { panic!("No HTTP match found for Android Chrome signature"); } } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/golden_tests.rs
huginn-net-http/tests/golden_tests.rs
use huginn_net_db::Database; use huginn_net_http::{HttpAnalysisResult, HuginnNetHttp}; use serde::{Deserialize, Serialize}; use std::fs; use std::path::Path; use std::sync::mpsc::channel; use std::sync::Arc; // Snapshot structures for JSON serialization #[derive(Serialize, Deserialize, Debug)] struct PcapSnapshot { pcap_file: String, pcap_path: String, expected_connections: usize, connections: Vec<ConnectionSnapshot>, } #[derive(Serialize, Deserialize, Debug)] struct ConnectionSnapshot { source: EndpointSnapshot, destination: EndpointSnapshot, http_request: Option<HttpRequestSnapshot>, http_response: Option<HttpResponseSnapshot>, } #[derive(Serialize, Deserialize, Debug)] struct EndpointSnapshot { ip: String, port: u16, } #[derive(Serialize, Deserialize, Debug)] struct HttpRequestSnapshot { browser: Option<String>, lang: Option<String>, quality: String, user_agent: Option<String>, method: Option<String>, uri: Option<String>, } #[derive(Serialize, Deserialize, Debug)] struct HttpResponseSnapshot { web_server: Option<String>, quality: String, status_code: Option<u16>, headers_count: usize, } fn load_snapshot(pcap_file: &str) -> PcapSnapshot { let snapshot_path = format!("tests/snapshots/{pcap_file}.json"); let snapshot_content = fs::read_to_string(&snapshot_path) .unwrap_or_else(|_| panic!("Failed to read snapshot file: {snapshot_path}")); serde_json::from_str(&snapshot_content) .unwrap_or_else(|e| panic!("Failed to parse snapshot JSON: {e}")) } fn analyze_pcap_file(pcap_path: &str) -> Vec<HttpAnalysisResult> { assert!(Path::new(pcap_path).exists(), "PCAP file must exist: {pcap_path}"); let db = Database::load_default().unwrap_or_else(|e| panic!("Failed to load database: {e}")); let mut analyzer = HuginnNetHttp::new(Some(Arc::new(db)), 1000) .unwrap_or_else(|e| panic!("Failed to create analyzer: {e}")); let (sender, receiver) = channel(); // Run PCAP analysis in the same thread to avoid lifetime issues if let Err(e) = analyzer.analyze_pcap(pcap_path, sender, None) { panic!("PCAP analysis failed: {e}"); } // Collect all results let mut results = Vec::new(); while let Ok(result) = receiver.try_recv() { // Only collect results that have meaningful HTTP data if has_meaningful_http_data(&result) { results.push(result); } } results } fn has_meaningful_http_data(result: &HttpAnalysisResult) -> bool { result.http_request.is_some() || result.http_response.is_some() } fn assert_connection_matches_snapshot( actual: &HttpAnalysisResult, expected: &ConnectionSnapshot, connection_index: usize, ) { // Check HTTP Request if let Some(expected_request) = &expected.http_request { if let Some(actual_request) = &actual.http_request { // Check source and destination assert_eq!( actual_request.source.ip.to_string(), expected.source.ip, "Connection {connection_index}: Request source IP mismatch" ); assert_eq!( actual_request.source.port, expected.source.port, "Connection {connection_index}: Request source port mismatch" ); assert_eq!( actual_request.destination.ip.to_string(), expected.destination.ip, "Connection {connection_index}: Request destination IP mismatch" ); assert_eq!( actual_request.destination.port, expected.destination.port, "Connection {connection_index}: Request destination port mismatch" ); // Check browser matching if let Some(expected_browser) = &expected_request.browser { if let Some(actual_browser) = &actual_request.browser_matched.browser { assert_eq!( actual_browser.name, *expected_browser, "Connection {connection_index}: HTTP browser mismatch" ); } } // Check language if let Some(expected_lang) = &expected_request.lang { assert_eq!( actual_request.lang.as_ref().unwrap_or_else(|| panic!( "Connection {connection_index}: Expected language but found none" )), expected_lang, "Connection {connection_index}: HTTP language mismatch" ); } // Check quality assert_eq!( format!("{:?}", actual_request.browser_matched.quality), expected_request.quality, "Connection {connection_index}: HTTP request quality mismatch" ); // Check user agent if let Some(expected_ua) = &expected_request.user_agent { assert_eq!( actual_request .sig .user_agent .as_ref() .unwrap_or_else(|| panic!( "Connection {connection_index}: Expected user agent but found none" )), expected_ua, "Connection {connection_index}: User agent mismatch" ); } // Check method if let Some(expected_method) = &expected_request.method { assert_eq!( actual_request.sig.method.as_ref().unwrap_or_else(|| panic!( "Connection {connection_index}: Expected method but found none" )), expected_method, "Connection {connection_index}: HTTP method mismatch" ); } // Check URI if let Some(expected_uri) = &expected_request.uri { assert_eq!( actual_request.sig.uri.as_ref().unwrap_or_else(|| panic!( "Connection {connection_index}: Expected URI but found none" )), expected_uri, "Connection {connection_index}: HTTP URI mismatch" ); } } else { panic!("Connection {connection_index}: Expected HTTP request but found none"); } } // Check HTTP Response if let Some(expected_response) = &expected.http_response { if let Some(actual_response) = &actual.http_response { // Check source and destination assert_eq!( actual_response.source.ip.to_string(), expected.source.ip, "Connection {connection_index}: Response source IP mismatch" ); assert_eq!( actual_response.source.port, expected.source.port, "Connection {connection_index}: Response source port mismatch" ); assert_eq!( actual_response.destination.ip.to_string(), expected.destination.ip, "Connection {connection_index}: Response destination IP mismatch" ); assert_eq!( actual_response.destination.port, expected.destination.port, "Connection {connection_index}: Response destination port mismatch" ); // Check web server matching if let Some(expected_server) = &expected_response.web_server { if let Some(actual_server) = &actual_response.web_server_matched.web_server { assert_eq!( actual_server.name, *expected_server, "Connection {connection_index}: HTTP web server mismatch" ); } } // Check quality assert_eq!( format!("{:?}", actual_response.web_server_matched.quality), expected_response.quality, "Connection {connection_index}: HTTP response quality mismatch" ); // Check status code if let Some(expected_status) = expected_response.status_code { assert_eq!( actual_response.sig.status_code.unwrap_or_else(|| panic!( "Connection {connection_index}: Expected status code but found none" )), expected_status, "Connection {connection_index}: HTTP status code mismatch" ); } // Check headers count assert_eq!( actual_response.sig.headers.len(), expected_response.headers_count, "Connection {connection_index}: Headers count mismatch" ); } else { panic!("Connection {connection_index}: Expected HTTP response but found none"); } } } /// Golden test: compares PCAP analysis output against known-good JSON snapshots fn test_pcap_with_snapshot(pcap_file: &str) { let snapshot = load_snapshot(pcap_file); let results = analyze_pcap_file(&snapshot.pcap_path); assert_eq!( results.len(), snapshot.expected_connections, "Expected {} connections in {}, found {}", snapshot.expected_connections, pcap_file, results.len() ); for (i, (actual, expected)) in results.iter().zip(snapshot.connections.iter()).enumerate() { assert_connection_matches_snapshot(actual, expected, i); } } #[test] fn test_golden_http_snapshots() { let golden_test_cases = [ "http-simple-get", // Add more PCAP files here as golden tests: // "http-post", // "http-headers", ]; for pcap_file in golden_test_cases { println!("Running golden test for: {pcap_file}"); test_pcap_with_snapshot(pcap_file); } }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http2_process.rs
huginn-net-http/tests/http2_process.rs
use huginn_net_db::http; use huginn_net_http::http2_parser::HTTP2_CONNECTION_PREFACE; use huginn_net_http::http2_process::{ convert_http2_request_to_observable, convert_http2_response_to_observable, extract_traffic_classification, has_complete_data, looks_like_http2_response, Http2Processor, }; use huginn_net_http::http_common::HttpProcessor; use huginn_net_http::{http2_parser, http_common}; #[test] fn test_http2_request_conversion() { // Create a mock HTTP/2 request let req = http2_parser::Http2Request { method: "GET".to_string(), path: "/test".to_string(), authority: Some("example.com".to_string()), scheme: Some("https".to_string()), version: http::Version::V20, headers: vec![], cookies: vec![], referer: None, stream_id: 1, parsing_metadata: http_common::ParsingMetadata { header_count: 0, duplicate_headers: vec![], case_variations: std::collections::HashMap::new(), parsing_time_ns: 0, has_malformed_headers: false, request_line_length: 0, total_headers_length: 0, }, frame_sequence: vec![], settings: http2_parser::Http2Settings::default(), }; let observable = convert_http2_request_to_observable(req); assert_eq!(observable.matching.version, http::Version::V20); assert_eq!(observable.method, Some("GET".to_string())); assert_eq!(observable.uri, Some("/test".to_string())); } #[test] fn test_http2_response_conversion() { let res = http2_parser::Http2Response { status: 200, version: http::Version::V20, headers: vec![], stream_id: 1, parsing_metadata: http_common::ParsingMetadata { header_count: 0, duplicate_headers: vec![], case_variations: std::collections::HashMap::new(), parsing_time_ns: 0, has_malformed_headers: false, request_line_length: 0, total_headers_length: 0, }, frame_sequence: vec![], server: Some("nginx/1.20".to_string()), content_type: Some("text/html".to_string()), }; let observable = convert_http2_response_to_observable(res); assert_eq!(observable.matching.version, http::Version::V20); assert_eq!(observable.status_code, Some(200)); assert_eq!(observable.matching.expsw, "nginx/1.20"); } #[test] fn test_get_diagnostic_for_http2() { let diagnosis = http_common::get_diagnostic(None, None, None); assert_eq!(diagnosis, http::HttpDiagnosis::Anonymous); } #[test] fn test_get_diagnostic_with_http2_user_agent() { let user_agent = Some("Mozilla/5.0 HTTP/2.0".to_string()); let os = "Linux".to_string(); let browser = Some("Firefox".to_string()); let ua_matcher: Option<(&String, &Option<String>)> = Some((&os, &browser)); let label = huginn_net_db::Label { ty: huginn_net_db::Type::Specified, class: None, name: "Linux".to_string(), flavor: None, }; let signature_os_matcher: Option<&huginn_net_db::Label> = Some(&label); let diagnosis = http_common::get_diagnostic(user_agent, ua_matcher, signature_os_matcher); assert_eq!(diagnosis, http::HttpDiagnosis::Generic); } #[test] fn test_no_preface() { let data = b"GET /path HTTP/1.1\r\n"; assert!(!has_complete_data(data)); } #[test] fn test_preface_only() { assert!(!has_complete_data(HTTP2_CONNECTION_PREFACE)); } #[test] fn test_incomplete_frame() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add incomplete frame header (only 5 bytes instead of 9) data.extend_from_slice(&[0x00, 0x00, 0x04, 0x01, 0x00]); assert!(!has_complete_data(&data)); } #[test] fn test_complete_settings_frame_no_headers() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add complete SETTINGS frame (type 0x4) data.extend_from_slice(&[ 0x00, 0x00, 0x00, // Length: 0 0x04, // Type: SETTINGS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x00, // Stream ID: 0 ]); assert!(!has_complete_data(&data)); // No HEADERS frame } #[test] fn test_complete_headers_frame() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add complete HEADERS frame (type 0x1) with stream ID 1 data.extend_from_slice(&[ 0x00, 0x00, 0x04, // Length: 4 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload (4 bytes) ]); assert!(has_complete_data(&data)); } #[test] fn test_incomplete_headers_frame() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add incomplete HEADERS frame (missing payload) data.extend_from_slice(&[ 0x00, 0x00, 0x04, // Length: 4 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, // Only 2 bytes of 4-byte payload ]); assert!(!has_complete_data(&data)); } #[test] fn test_headers_frame_stream_id_zero() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add HEADERS frame with stream ID 0 (invalid) data.extend_from_slice(&[ 0x00, 0x00, 0x00, // Length: 0 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x00, // Stream ID: 0 (invalid) ]); assert!(!has_complete_data(&data)); // Stream ID 0 is invalid for HEADERS } #[test] fn test_multiple_frames_with_headers() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add SETTINGS frame first data.extend_from_slice(&[ 0x00, 0x00, 0x06, // Length: 6 0x04, // Type: SETTINGS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x00, // Stream ID: 0 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, // Setting: ENABLE_PUSH = 1 ]); // Add HEADERS frame data.extend_from_slice(&[ 0x00, 0x00, 0x04, // Length: 4 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload (4 bytes) ]); assert!(has_complete_data(&data)); // Should find the HEADERS frame } #[test] fn test_response_frame_detection() { // Test response without preface (just frames) let response_data = [ 0x00, 0x00, 0x04, // Length: 4 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload (4 bytes) ]; assert!(has_complete_data(&response_data)); } #[test] fn test_frame_too_large() { let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); // Add frame with extremely large length (will cause overflow) data.extend_from_slice(&[ 0xFF, 0xFF, 0xFF, // Length: 16777215 (max 24-bit) 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 ]); assert!(!has_complete_data(&data)); // Should reject oversized frame } #[test] fn test_can_process_request_detection() { let processor = Http2Processor::new(); // Valid HTTP/2 request with preface let mut valid_data = HTTP2_CONNECTION_PREFACE.to_vec(); valid_data.extend_from_slice(&[0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01]); assert!(processor.can_process_request(&valid_data)); // HTTP/1.1 request let http1_data = b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"; assert!(!processor.can_process_request(http1_data)); // Too short data let short_data = b"PRI * HTTP/2.0"; assert!(!processor.can_process_request(short_data)); // Invalid preface let invalid_preface = b"GET * HTTP/2.0\r\n\r\nSM\r\n\r\n"; assert!(!processor.can_process_request(invalid_preface)); } #[test] fn test_can_process_response_detection() { let processor = Http2Processor::new(); // Valid HTTP/2 response frame let valid_response = [ 0x00, 0x00, 0x04, // Length: 4 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload ]; assert!(processor.can_process_response(&valid_response)); // HTTP/1.1 response let http1_response = b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n"; assert!(!processor.can_process_response(http1_response)); // Too short data let short_data = b"HTTP/2"; assert!(!processor.can_process_response(short_data)); // Invalid frame type let invalid_frame = [ 0x00, 0x00, 0x04, // Length: 4 0xFF, // Type: Unknown (255) 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload ]; assert!(!processor.can_process_response(&invalid_frame)); } #[test] fn test_looks_like_http2_response_edge_cases() { // Valid frame types (0-10) for frame_type in 0..=10 { let frame = [ 0x00, 0x00, 0x04, // Length: 4 frame_type, // Type: variable 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload ]; assert!(looks_like_http2_response(&frame), "Frame type {frame_type} should be valid"); } // Invalid frame type (11+) let invalid_frame = [ 0x00, 0x00, 0x04, // Length: 4 11, // Type: Invalid 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 0x00, 0x00, 0x00, 0x00, // Payload ]; assert!(!looks_like_http2_response(&invalid_frame)); // Frame too large (exceeds default max frame size) let large_frame = [ 0x00, 0x40, 0x01, // Length: 16385 (> 16384) 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 ]; assert!(!looks_like_http2_response(&large_frame)); // Maximum valid frame size let max_frame = [ 0x00, 0x40, 0x00, // Length: 16384 (exactly max) 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 ]; assert!(looks_like_http2_response(&max_frame)); } #[test] fn test_processor_trait_methods() { let processor = Http2Processor::new(); // Test trait methods assert_eq!(processor.supported_version(), http::Version::V20); assert_eq!(processor.name(), "HTTP/2"); } #[test] fn test_parse_http2_request_error_handling() { let processor = Http2Processor::new(); // Invalid preface should return error let invalid_data = b"GET / HTTP/1.1\r\n\r\n"; let result = processor.process_request(invalid_data); assert!(result.is_err()); // Valid preface but no frames should return Ok(None) let result = processor.process_request(HTTP2_CONNECTION_PREFACE); match result { Ok(None) => {} // Expected Ok(Some(_)) => panic!("Should return None for preface without frames"), Err(e) => panic!("Should not error for valid preface: {e:?}"), } } #[test] fn test_parse_http2_response_error_handling() { let processor = Http2Processor::new(); // Empty data should return Ok(None) let result = processor.process_response(&[]); match result { Ok(None) => {} // Expected Ok(Some(_)) => panic!("Should return None for empty data"), Err(e) => panic!("Should not error for empty data: {e:?}"), } // Invalid frame should return error or None let invalid_frame = [0x00, 0x00, 0x01, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00]; let result = processor.process_response(&invalid_frame); // Should handle gracefully (either error or None) match result { Ok(None) => {} // Acceptable Err(_) => {} // Also acceptable Ok(Some(_)) => panic!("Should not return valid response for invalid frame"), } } #[test] fn test_extract_traffic_classification() { // Test with Some value assert_eq!(extract_traffic_classification(Some("test")), "test"); // Test with None assert_eq!(extract_traffic_classification(None), "???"); } #[test] fn test_has_complete_data_edge_cases() { let processor = Http2Processor::new(); // Empty data assert!(!processor.has_complete_data(&[])); // Only preface assert!(!processor.has_complete_data(HTTP2_CONNECTION_PREFACE)); // Preface + incomplete frame header let mut data = HTTP2_CONNECTION_PREFACE.to_vec(); data.extend_from_slice(&[0x00, 0x00, 0x04, 0x01]); // Only 4 bytes of 9-byte header assert!(!processor.has_complete_data(&data)); // Response data (no preface) with valid frame let response_frame = [ 0x00, 0x00, 0x00, // Length: 0 0x01, // Type: HEADERS 0x00, // Flags: 0 0x00, 0x00, 0x00, 0x01, // Stream ID: 1 ]; assert!(processor.has_complete_data(&response_frame)); } #[test] fn test_conversion_functions_with_complex_data() { // Test request conversion with all fields let req = http2_parser::Http2Request { method: "POST".to_string(), path: "/api/test".to_string(), authority: Some("api.example.com:443".to_string()), scheme: Some("https".to_string()), version: http::Version::V20, headers: vec![http_common::HttpHeader { name: "content-type".to_string(), value: Some("application/json".to_string()), position: 0, source: http_common::HeaderSource::Http2Header, }], cookies: vec![http_common::HttpCookie { name: "session".to_string(), value: Some("abc123".to_string()), position: 1, }], referer: Some("https://example.com".to_string()), stream_id: 3, parsing_metadata: http_common::ParsingMetadata { header_count: 1, duplicate_headers: vec![], case_variations: std::collections::HashMap::new(), parsing_time_ns: 12345, has_malformed_headers: false, request_line_length: 0, total_headers_length: 25, }, frame_sequence: vec![ http2_parser::Http2FrameType::Settings, http2_parser::Http2FrameType::Headers, http2_parser::Http2FrameType::Data, ], settings: http2_parser::Http2Settings { header_table_size: Some(4096), enable_push: Some(false), max_concurrent_streams: Some(100), initial_window_size: Some(65535), max_frame_size: Some(16384), max_header_list_size: Some(8192), }, }; let observable = convert_http2_request_to_observable(req); assert_eq!(observable.method, Some("POST".to_string())); assert_eq!(observable.uri, Some("/api/test".to_string())); assert_eq!(observable.matching.version, http::Version::V20); assert!(!observable.headers.is_empty()); // Test response conversion with all fields let res = http2_parser::Http2Response { status: 201, version: http::Version::V20, headers: vec![ http_common::HttpHeader { name: "server".to_string(), value: Some("nginx/1.20".to_string()), position: 0, source: http_common::HeaderSource::Http2Header, }, http_common::HttpHeader { name: "content-type".to_string(), value: Some("text/html".to_string()), position: 1, source: http_common::HeaderSource::Http2Header, }, ], stream_id: 5, parsing_metadata: http_common::ParsingMetadata { header_count: 2, duplicate_headers: vec![], case_variations: std::collections::HashMap::new(), parsing_time_ns: 54321, has_malformed_headers: false, request_line_length: 0, total_headers_length: 30, }, frame_sequence: vec![ http2_parser::Http2FrameType::Headers, http2_parser::Http2FrameType::Data, ], server: Some("nginx/1.20".to_string()), content_type: Some("text/html".to_string()), }; let observable = convert_http2_response_to_observable(res); assert_eq!(observable.status_code, Some(201)); assert_eq!(observable.matching.version, http::Version::V20); assert!(!observable.headers.is_empty()); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http1_parser.rs
huginn-net-http/tests/http1_parser.rs
use huginn_net_db::http; use huginn_net_http::http1_parser::{Http1Config, Http1ParseError, Http1Parser}; fn unwrap_parser_result<T>(result: Result<Option<T>, Http1ParseError>) -> T { match result { Ok(Some(value)) => value, Ok(None) => { panic!("Parser returned None when Some was expected") } Err(e) => { panic!("Parser failed with error: {e}") } } } fn assert_parser_none<T>(result: Result<Option<T>, Http1ParseError>) { match result { Ok(None) => {} Ok(Some(_)) => panic!("Expected None but got Some"), Err(e) => panic!("Expected None but got error: {e}"), } } #[test] fn test_parse_simple_request() { let parser = Http1Parser::new(); let data = b"GET /path HTTP/1.1\r\nHost: example.com\r\nUser-Agent: test\r\n\r\n"; let request = unwrap_parser_result(parser.parse_request(data)); assert_eq!(request.method, "GET"); assert_eq!(request.uri, "/path"); assert_eq!(request.version, http::Version::V11); assert_eq!(request.headers.len(), 2); assert_eq!(request.host, Some("example.com".to_string())); assert_eq!(request.user_agent, Some("test".to_string())); } #[test] fn test_parse_request_with_cookies() { let parser = Http1Parser::new(); let data = b"GET / HTTP/1.1\r\nHost: example.com\r\nCookie: name1=value1; name2=value2\r\n\r\n"; let request = unwrap_parser_result(parser.parse_request(data)); assert_eq!(request.cookies.len(), 2); assert_eq!(request.cookies[0].name, "name1"); assert_eq!(request.cookies[0].value, Some("value1".to_string())); assert_eq!(request.cookies[1].name, "name2"); assert_eq!(request.cookies[1].value, Some("value2".to_string())); } #[test] fn test_parse_request_with_referer() { let parser = Http1Parser::new(); let data = b"GET /page HTTP/1.1\r\nHost: example.com\r\nReferer: https://google.com/search\r\nUser-Agent: test-browser\r\n\r\n"; let request = unwrap_parser_result(parser.parse_request(data)); assert_eq!(request.method, "GET"); assert_eq!(request.uri, "/page"); assert_eq!(request.host, Some("example.com".to_string())); assert_eq!(request.referer, Some("https://google.com/search".to_string())); assert_eq!(request.user_agent, Some("test-browser".to_string())); } #[test] fn test_parse_request_without_referer() { let parser = Http1Parser::new(); let data = b"GET /page HTTP/1.1\r\nHost: example.com\r\nUser-Agent: test-browser\r\n\r\n"; let request = unwrap_parser_result(parser.parse_request(data)); assert_eq!(request.method, "GET"); assert_eq!(request.uri, "/page"); assert_eq!(request.host, Some("example.com".to_string())); assert_eq!(request.referer, None); assert_eq!(request.user_agent, Some("test-browser".to_string())); } #[test] fn test_cookie_and_referer_excluded_from_headers_list() { let parser = Http1Parser::new(); let data = b"GET /page HTTP/1.1\r\nHost: example.com\r\nCookie: session=abc123\r\nReferer: https://google.com\r\nUser-Agent: test-browser\r\nAccept: text/html\r\n\r\n"; let request = unwrap_parser_result(parser.parse_request(data)); assert_eq!(request.cookies.len(), 1); assert_eq!(request.cookies[0].name, "session"); assert_eq!(request.cookies[0].value, Some("abc123".to_string())); assert_eq!(request.referer, Some("https://google.com".to_string())); let header_names: Vec<String> = request .headers .iter() .map(|h| h.name.to_lowercase()) .collect(); assert!( !header_names.contains(&"cookie".to_string()), "Cookie header should not be in headers list" ); assert!( !header_names.contains(&"referer".to_string()), "Referer header should not be in headers list" ); assert!(header_names.contains(&"host".to_string())); assert!(header_names.contains(&"user-agent".to_string())); assert!(header_names.contains(&"accept".to_string())); assert_eq!(request.headers.len(), 3); } #[test] fn test_parse_response() { let parser = Http1Parser::new(); let data = b"HTTP/1.1 200 OK\r\nServer: nginx\r\nContent-Type: text/html\r\n\r\n"; let response = unwrap_parser_result(parser.parse_response(data)); assert_eq!(response.version, http::Version::V11); assert_eq!(response.status_code, 200); assert_eq!(response.reason_phrase, "OK"); assert_eq!(response.server, Some("nginx".to_string())); assert_eq!(response.content_type, Some("text/html".to_string())); } #[test] fn test_incomplete_request() { let parser = Http1Parser::new(); let data = b"GET /path HTTP/1.1\r\nHost: example.com"; assert_parser_none(parser.parse_request(data)); } #[test] fn test_malformed_request_line() { let parser = Http1Parser::new(); let data = b"INVALID REQUEST LINE\r\n\r\n"; let result = parser.parse_request(data); assert!(result.is_err()); } #[test] fn test_header_order_preservation() { let parser = Http1Parser::new(); let data = b"GET / HTTP/1.1\r\nZ-Header: first\r\nA-Header: second\r\nM-Header: third\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert_eq!(result.headers[0].name, "Z-Header"); assert_eq!(result.headers[0].position, 0); assert_eq!(result.headers[1].name, "A-Header"); assert_eq!(result.headers[1].position, 1); assert_eq!(result.headers[2].name, "M-Header"); assert_eq!(result.headers[2].position, 2); } #[test] fn test_case_variations_detection() { let parser = Http1Parser::new(); let data = b"GET / HTTP/1.1\r\nHost: example.com\r\nHOST: example2.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert!(result.parsing_metadata.case_variations.contains_key("host")); assert!(result .parsing_metadata .duplicate_headers .contains(&"host".to_string())); } // ========== SECURITY TESTS ========== #[test] fn test_extremely_long_request_line() { let parser = Http1Parser::new(); // Create request line longer than max_request_line_length (8192) let long_path = "a".repeat(10000); let request_line = format!("GET /{long_path} HTTP/1.1"); let data = format!("{request_line}\r\nHost: example.com\r\n\r\n"); let result = parser.parse_request(data.as_bytes()); assert!(result.is_err()); if let Err(Http1ParseError::InvalidRequestLine(msg)) = result { assert!(msg.contains("too long")); } else { panic!("Expected InvalidRequestLine error"); } } #[test] fn test_extremely_long_header() { let parser = Http1Parser::new(); // Create header longer than max_header_length (8192) let long_value = "x".repeat(10000); let data = format!("GET / HTTP/1.1\r\nLong-Header: {long_value}\r\n\r\n"); let result = parser.parse_request(data.as_bytes()); assert!(result.is_err()); if let Err(Http1ParseError::HeaderTooLong(len)) = result { assert!(len > 8192); } else { panic!("Expected HeaderTooLong error"); } } #[test] fn test_too_many_headers() { let parser = Http1Parser::new(); // Create more than max_headers (100) let mut data = String::from("GET / HTTP/1.1\r\n"); for i in 0..150 { data.push_str(&format!("Header-{i}: value{i}\r\n")); } data.push_str("\r\n"); let result = parser.parse_request(data.as_bytes()); assert!(result.is_err()); if let Err(Http1ParseError::TooManyHeaders(count)) = result { assert_eq!(count, 150); } else { panic!("Expected TooManyHeaders error"); } } #[test] fn test_invalid_utf8_handling() { let parser = Http1Parser::new(); // Create data with invalid UTF-8 sequences let mut data = Vec::from("GET / HTTP/1.1\r\nHost: "); data.extend_from_slice(&[0xFF, 0xFE, 0xFD]); // Invalid UTF-8 data.extend_from_slice(b"\r\n\r\n"); let result = parser.parse_request(&data); assert!(result.is_err()); if let Err(Http1ParseError::InvalidUtf8) = result { // Expected } else { panic!("Expected InvalidUtf8 error"); } } // ========== EDGE CASES ========== #[test] fn test_empty_data() { let parser = Http1Parser::new(); assert_parser_none(parser.parse_request(b"")); assert_parser_none(parser.parse_response(b"")); } #[test] fn test_only_request_line() { let parser = Http1Parser::new(); // No headers, no empty line let data = b"GET / HTTP/1.1"; assert_parser_none(parser.parse_request(data)); // With CRLF but no empty line let data = b"GET / HTTP/1.1\r\n"; assert_parser_none(parser.parse_request(data)); } #[test] fn test_different_line_endings() { let parser = Http1Parser::new(); // Test with LF only (Unix style) let data_lf = b"GET / HTTP/1.1\nHost: example.com\n\n"; let result_lf = unwrap_parser_result(parser.parse_request(data_lf)); assert_eq!(result_lf.method, "GET"); // Test with CRLF (Windows/HTTP standard) let data_crlf = b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"; let result_crlf = unwrap_parser_result(parser.parse_request(data_crlf)); assert_eq!(result_crlf.method, "GET"); } #[test] fn test_malformed_headers() { let parser = Http1Parser::new(); // Header without colon (non-strict mode) let data = b"GET / HTTP/1.1\r\nMalformed Header Without Colon\r\nHost: example.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert!(result.parsing_metadata.has_malformed_headers); // Header with empty name let data = b"GET / HTTP/1.1\r\n: empty-name\r\nHost: example.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert!(result.parsing_metadata.has_malformed_headers); } #[test] fn test_strict_parsing_mode() { let config = Http1Config { strict_parsing: true, ..Default::default() }; let parser = Http1Parser::with_config(config); // Malformed header should fail in strict mode let data = b"GET / HTTP/1.1\r\nMalformed Header Without Colon\r\n\r\n"; let result = parser.parse_request(data); assert!(result.is_err()); if let Err(Http1ParseError::MalformedHeader(header)) = result { assert_eq!(header, "Malformed Header Without Colon"); } else { panic!("Expected MalformedHeader error"); } } #[test] fn test_invalid_methods() { let parser = Http1Parser::new(); let invalid_methods = ["INVALID", "123", "", "G E T", "get"]; for method in invalid_methods { let data = format!("{method} / HTTP/1.1\r\nHost: example.com\r\n\r\n"); let result = parser.parse_request(data.as_bytes()); assert!(result.is_err(), "Method '{method}' should be invalid"); } } #[test] fn test_valid_extended_methods() { let parser = Http1Parser::new(); let valid_methods = ["PROPFIND", "PROPPATCH", "MKCOL", "COPY", "MOVE", "LOCK", "UNLOCK"]; for method in valid_methods { let data = format!("{method} / HTTP/1.1\r\nHost: example.com\r\n\r\n"); let result = unwrap_parser_result(parser.parse_request(data.as_bytes())); assert_eq!(result.method, method); } } #[test] fn test_invalid_http_versions() { let parser = Http1Parser::new(); let invalid_versions = ["HTTP/2.0", "HTTP/0.9", "HTTP/1.2", "HTTP/1", "HTTP", "1.1"]; for version in invalid_versions { let data = format!("GET / {version}\r\nHost: example.com\r\n\r\n"); let result = parser.parse_request(data.as_bytes()); assert!(result.is_err(), "Version '{version}' should be invalid"); } } #[test] fn test_invalid_status_codes() { let parser = Http1Parser::new(); let invalid_codes = ["abc", "999999", "", "-1", "1.5"]; for code in invalid_codes { let data = format!("HTTP/1.1 {code} OK\r\nServer: test\r\n\r\n"); let result = parser.parse_response(data.as_bytes()); assert!(result.is_err(), "Status code '{code}' should be invalid"); } } #[test] fn test_edge_case_status_lines() { let parser = Http1Parser::new(); // Status line without reason phrase let data = b"HTTP/1.1 404\r\nServer: test\r\n\r\n"; let result = unwrap_parser_result(parser.parse_response(data)); assert_eq!(result.status_code, 404); assert_eq!(result.reason_phrase, ""); // Status line with spaces in reason phrase let data = b"HTTP/1.1 404 Not Found Here\r\nServer: test\r\n\r\n"; let result = unwrap_parser_result(parser.parse_response(data)); assert_eq!(result.status_code, 404); assert_eq!(result.reason_phrase, "Not Found Here"); } #[test] fn test_cookie_parsing_edge_cases() { let parser = Http1Parser::new(); let cookie_test_cases = vec![ ("", 0), // Empty cookie header ("name=value", 1), // Simple cookie ("name=", 1), // Empty value ("name", 1), // No value ("name=value; other=test", 2), // Multiple cookies (" name = value ; other=test", 2), // Whitespace handling ("name=value;", 1), // Trailing semicolon (";name=value", 1), // Leading semicolon ("name=value;;other=test", 2), // Double semicolon ("name=value; ; other=test", 2), // Empty cookie between ]; for (cookie_str, expected_count) in cookie_test_cases { let data = format!("GET / HTTP/1.1\r\nHost: example.com\r\nCookie: {cookie_str}\r\n\r\n"); let result = unwrap_parser_result(parser.parse_request(data.as_bytes())); assert_eq!(result.cookies.len(), expected_count, "Failed for cookie: '{cookie_str}'"); } } #[test] fn test_parse_cookies_direct() { let parser = Http1Parser::new(); let test_cases = vec![ ("", 0), ("name=value", 1), ("name=", 1), ("name", 1), ("name=value; other=test", 2), (" name = value ", 1), ("name=value;", 1), (";name=value", 1), ("name=value;;other=test", 2), ]; for (cookie_str, expected_count) in test_cases { let cookies = parser.parse_cookies(cookie_str); assert_eq!(cookies.len(), expected_count, "Failed for case: '{cookie_str}'"); match cookie_str { "" => { assert!(cookies.is_empty()); } "name=value" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[0].position, 0); } "name=" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("".to_string())); assert_eq!(cookies[0].position, 0); } "name" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, None); assert_eq!(cookies[0].position, 0); } "name=value; other=test" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[0].position, 0); assert_eq!(cookies[1].name, "other"); assert_eq!(cookies[1].value, Some("test".to_string())); assert_eq!(cookies[1].position, 1); } " name = value " => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[0].position, 0); } "name=value;" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[0].position, 0); } ";name=value" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[0].position, 0); } "name=value;;other=test" => { assert_eq!(cookies[0].name, "name"); assert_eq!(cookies[0].value, Some("value".to_string())); assert_eq!(cookies[0].position, 0); assert_eq!(cookies[1].name, "other"); assert_eq!(cookies[1].value, Some("test".to_string())); assert_eq!(cookies[1].position, 1); } _ => {} } } } #[test] fn test_parse_cookies_rfc6265_compliance() { let parser = Http1Parser::new(); // RFC 6265 examples - HTTP/1.x single cookie header format let rfc_cases = vec![ ( "session_id=abc123; user_id=456; theme=dark; lang=en", vec![ ("session_id", Some("abc123")), ("user_id", Some("456")), ("theme", Some("dark")), ("lang", Some("en")), ], ), ( "token=xyz; secure; httponly", vec![("token", Some("xyz")), ("secure", None), ("httponly", None)], ), ]; for (cookie_str, expected_cookies) in rfc_cases { let cookies = parser.parse_cookies(cookie_str); assert_eq!(cookies.len(), expected_cookies.len(), "Failed for RFC case: '{cookie_str}'"); for (i, (expected_name, expected_value)) in expected_cookies.iter().enumerate() { assert_eq!(cookies[i].name, *expected_name); assert_eq!(cookies[i].value, expected_value.map(|v| v.to_string())); assert_eq!(cookies[i].position, i); } } } #[test] fn test_header_value_edge_cases() { let parser = Http1Parser::new(); // Header with no value let data = b"GET / HTTP/1.1\r\nEmpty-Header:\r\nHost: example.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); let empty_header = result.headers.iter().find(|h| h.name == "Empty-Header"); assert!(empty_header.is_some(), "Empty-Header should be present"); assert_eq!(empty_header.as_ref().and_then(|h| h.value.as_deref()), Some("")); // Header with only spaces as value let data = b"GET / HTTP/1.1\r\nSpaces-Header: \r\nHost: example.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); let spaces_header = result.headers.iter().find(|h| h.name == "Spaces-Header"); assert!(spaces_header.is_some(), "Spaces-Header should be present"); assert_eq!(spaces_header.as_ref().and_then(|h| h.value.as_deref()), Some("")); // Header with leading/trailing spaces let data = b"GET / HTTP/1.1\r\nTrim-Header: value with spaces \r\nHost: example.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); let trim_header = result.headers.iter().find(|h| h.name == "Trim-Header"); assert!(trim_header.is_some(), "Trim-Header should be present"); assert_eq!(trim_header.as_ref().and_then(|h| h.value.as_deref()), Some("value with spaces")); } #[test] fn test_request_line_edge_cases() { let parser = Http1Parser::new(); // Too few parts let data = b"GET HTTP/1.1\r\nHost: example.com\r\n\r\n"; let result = parser.parse_request(data); assert!(result.is_err()); // Too many parts (extra spaces) let data = b"GET / HTTP/1.1 extra\r\nHost: example.com\r\n\r\n"; let result = parser.parse_request(data); assert!(result.is_err()); // Empty method let data = b" / HTTP/1.1\r\nHost: example.com\r\n\r\n"; let result = parser.parse_request(data); assert!(result.is_err()); } #[test] fn test_content_length_parsing() { let parser = Http1Parser::new(); // Valid content length let data = b"GET / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 42\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert_eq!(result.content_length, Some(42)); // Invalid content length (non-numeric) let data = b"GET / HTTP/1.1\r\nHost: example.com\r\nContent-Length: abc\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert_eq!(result.content_length, None); // Multiple content length headers (should use first valid one) let data = b"GET / HTTP/1.1\r\nHost: example.com\r\nContent-Length: 42\r\nContent-Length: 24\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert_eq!(result.content_length, Some(42)); } #[test] fn test_can_parse_detection() { use huginn_net_http::http_process::HttpProcessors; let processors = HttpProcessors::new(); // Valid HTTP/1.x requests - should be parseable assert!(processors .parse_request(b"GET / HTTP/1.1\r\n\r\n") .is_some()); assert!(processors .parse_request(b"POST /api HTTP/1.0\r\n\r\n") .is_some()); assert!(processors .parse_request(b"PUT /data HTTP/1.1\r\n\r\n") .is_some()); // Valid HTTP/1.x responses - should be parseable assert!(processors .parse_response(b"HTTP/1.1 200 OK\r\n\r\n") .is_some()); assert!(processors .parse_response(b"HTTP/1.0 404 Not Found\r\n\r\n") .is_some()); // Invalid data - should not be parseable assert!(processors.parse_request(b"").is_none()); assert!(processors.parse_request(b"short").is_none()); assert!(processors.parse_request(b"INVALID DATA HERE").is_none()); assert!(processors.parse_request(b"PRI * HTTP/2.0\r\n").is_none()); // HTTP/2 preface } #[test] fn test_error_display_formatting() { // Test that all error types format correctly let errors = vec![ Http1ParseError::InvalidRequestLine("test".to_string()), Http1ParseError::InvalidStatusLine("test".to_string()), Http1ParseError::InvalidVersion("test".to_string()), Http1ParseError::InvalidMethod("test".to_string()), Http1ParseError::InvalidStatusCode("test".to_string()), Http1ParseError::HeaderTooLong(12345), Http1ParseError::TooManyHeaders(999), Http1ParseError::MalformedHeader("test".to_string()), Http1ParseError::IncompleteData, Http1ParseError::InvalidUtf8, ]; for error in errors { let formatted = format!("{error}"); assert!(!formatted.is_empty()); assert!(!formatted.contains("Debug")); // Should be Display, not Debug } } #[test] fn test_config_limits() { // Test with very restrictive config let config = Http1Config { max_headers: 2, max_request_line_length: 50, max_header_length: 30, preserve_header_order: true, parse_cookies: false, strict_parsing: true, }; let parser = Http1Parser::with_config(config); // Should work within limits let data = b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); assert_eq!(result.method, "GET"); assert!(result.cookies.is_empty()); // Cookie parsing disabled // Should fail when exceeding header count limit let data = b"GET / HTTP/1.1\r\nHost: example.com\r\nUser-Agent: test\r\nAccept: */*\r\n\r\n"; let result = parser.parse_request(data); assert!(result.is_err()); } #[test] fn test_performance_metadata() { let parser = Http1Parser::new(); let data = b"GET /path HTTP/1.1\r\nHost: example.com\r\nUser-Agent: test\r\n\r\n"; let result = unwrap_parser_result(parser.parse_request(data)); // Verify metadata is populated assert!(result.parsing_metadata.parsing_time_ns > 0); assert_eq!(result.parsing_metadata.header_count, 2); assert_eq!(result.parsing_metadata.request_line_length, "GET /path HTTP/1.1".len()); assert!(result.parsing_metadata.total_headers_length > 0); assert!(!result.parsing_metadata.has_malformed_headers); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http_process.rs
huginn-net-http/tests/http_process.rs
use huginn_net_db::http; use huginn_net_http::http1_process; use huginn_net_http::http_common; use huginn_net_http::http_common::HttpProcessor; #[test] fn test_parse_http1_request() { let valid_request = b"GET / HTTP/1.1\r\n\ Host: example.com\r\n\ Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\r\n\ Accept-Language: en-US,en;q=0.9,es;q=0.8\r\n\ Cache-Control: max-age=0\r\n\ Connection: keep-alive\r\n\ If-Modified-Since: Thu, 17 Oct 2019 07:18:26 GMT\r\n\ If-None-Match: \"3147526947\"\r\n\ Upgrade-Insecure-Requests: 1\r\n\ User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36\r\n\ \r\n"; match http1_process::Http1Processor::new().process_request(valid_request) { Ok(Some(request)) => { assert_eq!(request.lang, Some("English".to_string())); assert_eq!(request.user_agent, Some("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36".to_string())); assert_eq!(request.matching.version, http::Version::V11); let expected_horder = vec![ http::Header::new("Host"), http::Header::new("Accept").with_value("text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7"), http::Header::new("Accept-Language").with_value("en-US,en;q=0.9,es;q=0.8"), http::Header::new("Cache-Control").optional(), http::Header::new("Connection").with_value("keep-alive"), http::Header::new("If-Modified-Since").optional(), http::Header::new("If-None-Match").optional(), http::Header::new("Upgrade-Insecure-Requests").with_value("1"), http::Header::new("User-Agent"), ]; assert_eq!(request.matching.horder, expected_horder); let expected_habsent = vec![ http::Header::new("Accept-Encoding"), http::Header::new("Accept-Charset"), http::Header::new("Keep-Alive"), ]; assert_eq!(request.matching.habsent, expected_habsent); assert_eq!(request.matching.expsw, "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"); } Ok(None) => panic!("Incomplete HTTP request"), Err(e) => panic!("Failed to parse HTTP request: {e}"), } } #[test] fn test_parse_http1_response() { let valid_response = b"HTTP/1.1 200 OK\r\n\ Server: Apache\r\n\ Content-Type: text/html; charset=UTF-8\r\n\ Content-Length: 112\r\n\ Connection: keep-alive\r\n\ \r\n\ <html><body><h1>It works!</h1></body></html>"; match http1_process::Http1Processor::new().process_response(valid_response) { Ok(Some(response)) => { assert_eq!(response.matching.expsw, "Apache"); assert_eq!(response.matching.version, http::Version::V11); let expected_horder = vec![ http::Header::new("Server"), http::Header::new("Content-Type"), http::Header::new("Content-Length").optional(), http::Header::new("Connection").with_value("keep-alive"), ]; assert_eq!(response.matching.horder, expected_horder); let expected_absent = vec![ http::Header::new("Keep-Alive"), http::Header::new("Accept-Ranges"), http::Header::new("Date"), ]; assert_eq!(response.matching.habsent, expected_absent); } Ok(None) => panic!("Incomplete HTTP response"), Err(e) => panic!("Failed to parse HTTP response: {e}"), } } #[test] fn test_get_diagnostic_for_empty_sw() { let diagnosis: http::HttpDiagnosis = http_common::get_diagnostic(None, None, None); assert_eq!(diagnosis, http::HttpDiagnosis::Anonymous); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/http_languages.rs
huginn-net-http/tests/http_languages.rs
use huginn_net_http::http_languages::get_highest_quality_language; #[test] fn test_get_highest_quality_language_from_regular_case_with_several_languages() { let accept_language = "en;q=0.8,es;q=0.9,fr;q=0.7".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, Some("Spanish".to_string())); } #[test] fn test_get_highest_quality_language_is_first_one() { let accept_language = "en;q=1.0,es;q=0.8".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, Some("English".to_string())); } #[test] fn test_get_highest_quality_language_is_last_one() { let accept_language = "de;q=0.9,fr;q=1.0".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, Some("French".to_string())); } #[test] fn test_get_highest_quality_language_with_no_quality_specified() { let accept_language = "de,fr".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, Some("German".to_string())); } #[test] fn test_get_highest_quality_language_when_variant_used() { let accept_language = "en-US;q=0.9,es;q=0.8".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, Some("English".to_string())); } #[test] fn test_get_highest_quality_language_with_only_one_language() { let accept_language = "es".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, Some("Spanish".to_string())); } #[test] fn test_get_highest_quality_without_language() { let accept_language = "".to_string(); let result = get_highest_quality_language(accept_language); assert_eq!(result, None); } #[test] fn test_get_highest_quality_language_with_malformed_parts() { // Test with malformed Accept-Language header containing empty parts and semicolons let accept_language = "en;q=0.8,,;q=0.5,es;q=0.9,;,fr;q=0.7".to_string(); let result = get_highest_quality_language(accept_language); // Should still work and return Spanish (highest quality = 0.9) assert_eq!(result, Some("Spanish".to_string())); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/akamai_golden_tests.rs
huginn-net-http/tests/akamai_golden_tests.rs
//! Golden tests for Akamai HTTP/2 fingerprinting use huginn_net_http::{extract_akamai_fingerprint, Http2Frame}; use serde::{Deserialize, Serialize}; use std::fs; #[derive(Serialize, Deserialize, Debug, Clone)] struct AkamaiTestCase { name: String, description: String, frames: Vec<FrameSnapshot>, expected_fingerprint: Option<ExpectedFingerprint>, } #[derive(Serialize, Deserialize, Debug, Clone)] struct FrameSnapshot { frame_type: u8, flags: u8, stream_id: u32, payload: Vec<u8>, } #[derive(Serialize, Deserialize, Debug, Clone)] struct ExpectedFingerprint { signature: String, hash: String, settings_count: usize, window_update: u32, priority_frames_count: usize, pseudo_headers_count: usize, } impl From<FrameSnapshot> for Http2Frame { fn from(snapshot: FrameSnapshot) -> Self { Http2Frame::new(snapshot.frame_type, snapshot.flags, snapshot.stream_id, snapshot.payload) } } fn load_test_cases() -> Vec<AkamaiTestCase> { let test_data = match fs::read_to_string("tests/snapshots/akamai_test_cases.json") { Ok(data) => data, Err(e) => panic!("Failed to read akamai_test_cases.json: {e}"), }; match serde_json::from_str(&test_data) { Ok(cases) => cases, Err(e) => panic!("Failed to parse test cases JSON: {e}"), } } struct ActualFingerprint<'a> { signature: &'a str, hash: &'a str, settings_count: usize, window_update: u32, priority_count: usize, pseudo_headers_count: usize, } fn assert_fingerprint_matches( actual: &ActualFingerprint, expected: &ExpectedFingerprint, test_name: &str, ) { assert_eq!(actual.signature, expected.signature, "[{test_name}] Signature mismatch"); assert_eq!(actual.hash, expected.hash, "[{test_name}] Hash mismatch"); assert_eq!( actual.settings_count, expected.settings_count, "[{test_name}] Settings count mismatch" ); assert_eq!( actual.window_update, expected.window_update, "[{test_name}] Window update mismatch" ); assert_eq!( actual.priority_count, expected.priority_frames_count, "[{test_name}] Priority frames count mismatch" ); assert_eq!( actual.pseudo_headers_count, expected.pseudo_headers_count, "[{test_name}] Pseudo-headers count mismatch" ); } #[test] fn test_akamai_golden_snapshots() { let test_cases = load_test_cases(); for test_case in test_cases { println!("Running Akamai golden test: {}", test_case.name); println!(" Description: {}", test_case.description); let frames: Vec<Http2Frame> = test_case.frames.into_iter().map(Http2Frame::from).collect(); let fingerprint = extract_akamai_fingerprint(&frames); match (&fingerprint, &test_case.expected_fingerprint) { (Some(actual_fp), Some(expected)) => { let actual = ActualFingerprint { signature: &actual_fp.fingerprint, hash: &actual_fp.hash, settings_count: actual_fp.settings.len(), window_update: actual_fp.window_update, priority_count: actual_fp.priority_frames.len(), pseudo_headers_count: actual_fp.pseudo_header_order.len(), }; assert_fingerprint_matches(&actual, expected, &test_case.name); } (None, None) => { /* expected */ } (Some(actual), None) => { panic!( "[{}] Expected no fingerprint, but got: {}", test_case.name, actual.fingerprint ); } (None, Some(_)) => { panic!("[{}] Expected fingerprint, but none was generated", test_case.name); } } } } #[test] fn test_chrome_fingerprint() { let chrome_frames = vec![ Http2Frame::new( 0x4, // SETTINGS 0x0, 0, vec![ 0x00, 0x03, 0x00, 0x00, 0x00, 0x64, // HEADER_TABLE_SIZE = 100 0x00, 0x04, 0x00, 0x60, 0x00, 0x00, // INITIAL_WINDOW_SIZE = 6291456 ], ), Http2Frame::new( 0x8, // WINDOW_UPDATE 0x0, 0, vec![0x00, 0xEE, 0xFF, 0x01], // increment = 15663105 ), Http2Frame::new( 0x2, // PRIORITY 0x0, 3, vec![ 0x00, 0x00, 0x00, 0x00, // stream dependency = 0 0xC8, // weight = 200 ], ), ]; let fingerprint = if let Some(fp) = extract_akamai_fingerprint(&chrome_frames) { fp } else { panic!("Failed to extract Chrome fingerprint"); }; assert_eq!(fingerprint.settings.len(), 2); assert_eq!(fingerprint.window_update, 15662849); assert_eq!(fingerprint.priority_frames.len(), 1); // Verify signature format assert!(fingerprint.fingerprint.contains('|')); assert!(!fingerprint.hash.is_empty()); assert_eq!(fingerprint.hash.len(), 32); // SHA-256 truncated to 32 hex chars (like JA3) } #[test] fn test_firefox_fingerprint() { let firefox_frames = vec![ Http2Frame::new( 0x4, // SETTINGS 0x0, 0, vec![ 0x00, 0x03, 0x00, 0x00, 0x10, 0x00, // HEADER_TABLE_SIZE = 4096 0x00, 0x04, 0x00, 0x00, 0x00, 0x64, // INITIAL_WINDOW_SIZE = 100 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, // ENABLE_PUSH = 0 ], ), Http2Frame::new( 0x8, // WINDOW_UPDATE 0x0, 0, vec![0x00, 0xBE, 0xFF, 0x01], // increment = 12517377 ), ]; let fingerprint = if let Some(fp) = extract_akamai_fingerprint(&firefox_frames) { fp } else { panic!("Failed to extract Firefox fingerprint"); }; assert_eq!(fingerprint.settings.len(), 3); assert_eq!(fingerprint.window_update, 12517121); assert_eq!(fingerprint.priority_frames.len(), 0); // Verify signature format assert!(fingerprint.fingerprint.contains('|')); assert!(!fingerprint.hash.is_empty()); assert_eq!(fingerprint.hash.len(), 32); // SHA-256 truncated to 32 hex chars } #[test] fn test_fingerprint_deterministic() { let frames = vec![ Http2Frame::new(0x4, 0x0, 0, vec![0x00, 0x03, 0x00, 0x00, 0x00, 0x64]), Http2Frame::new(0x8, 0x0, 0, vec![0x00, 0xEE, 0xFF, 0x01]), ]; let fp1 = if let Some(fp) = extract_akamai_fingerprint(&frames) { fp } else { panic!("First fingerprint extraction failed"); }; let fp2 = if let Some(fp) = extract_akamai_fingerprint(&frames) { fp } else { panic!("Second fingerprint extraction failed"); }; assert_eq!(fp1.fingerprint, fp2.fingerprint, "Signatures must be deterministic"); assert_eq!(fp1.hash, fp2.hash, "Hashes must be deterministic"); } #[test] fn test_different_browsers_different_fingerprints() { let chrome_frames = vec![ Http2Frame::new(0x4, 0x0, 0, vec![0x00, 0x03, 0x00, 0x00, 0x00, 0x64]), Http2Frame::new(0x8, 0x0, 0, vec![0x00, 0xEE, 0xFF, 0x01]), ]; let firefox_frames = vec![ Http2Frame::new(0x4, 0x0, 0, vec![0x00, 0x03, 0x00, 0x00, 0x10, 0x00]), Http2Frame::new(0x8, 0x0, 0, vec![0x00, 0xBE, 0xFF, 0x01]), ]; let chrome_fp = if let Some(fp) = extract_akamai_fingerprint(&chrome_frames) { fp } else { panic!("Chrome fingerprint failed"); }; let firefox_fp = if let Some(fp) = extract_akamai_fingerprint(&firefox_frames) { fp } else { panic!("Firefox fingerprint failed"); }; assert_ne!( chrome_fp.fingerprint, firefox_fp.fingerprint, "Different browsers must produce different signatures" ); assert_ne!( chrome_fp.hash, firefox_fp.hash, "Different browsers must produce different hashes" ); } #[test] fn test_minimal_frames() { // Only SETTINGS frame (minimum required) let minimal_frames = vec![Http2Frame::new(0x4, 0x0, 0, vec![0x00, 0x03, 0x00, 0x00, 0x00, 0x64])]; let fingerprint = if let Some(fp) = extract_akamai_fingerprint(&minimal_frames) { fp } else { panic!("Should generate fingerprint with minimal frames"); }; assert_eq!(fingerprint.settings.len(), 1); assert_eq!(fingerprint.window_update, 0); assert_eq!(fingerprint.priority_frames.len(), 0); } #[test] fn test_no_settings_frame_returns_none() { let no_settings_frames = vec![ Http2Frame::new(0x8, 0x0, 0, vec![0x00, 0xEE, 0xFF, 0x01]), Http2Frame::new(0x2, 0x0, 3, vec![0x00, 0x00, 0x00, 0x00, 0xC8]), ]; let fingerprint = extract_akamai_fingerprint(&no_settings_frames); assert!(fingerprint.is_none(), "Should return None without SETTINGS frame"); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false
biandratti/huginn-net
https://github.com/biandratti/huginn-net/blob/ef479d3357bc040b4dcdacffbb4ab8db07051f1b/huginn-net-http/tests/akamai.rs
huginn-net-http/tests/akamai.rs
use huginn_net_http::{ AkamaiFingerprint, Http2Priority, PseudoHeader, SettingId, SettingParameter, }; #[test] fn test_akamai_fingerprint_chrome() { let settings = vec![ SettingParameter { id: SettingId::HeaderTableSize, value: 65536 }, SettingParameter { id: SettingId::EnablePush, value: 0 }, SettingParameter { id: SettingId::MaxConcurrentStreams, value: 1000 }, SettingParameter { id: SettingId::InitialWindowSize, value: 6291456 }, SettingParameter { id: SettingId::MaxFrameSize, value: 16384 }, SettingParameter { id: SettingId::MaxHeaderListSize, value: 262144 }, ]; let pseudo_headers = vec![ PseudoHeader::Method, PseudoHeader::Path, PseudoHeader::Authority, PseudoHeader::Scheme, ]; let fp = AkamaiFingerprint::new(settings, 15663105, vec![], pseudo_headers); assert_eq!( fp.fingerprint, "1:65536;2:0;3:1000;4:6291456;5:16384;6:262144|15663105|0|m,p,a,s" ); assert!(!fp.hash.is_empty()); } #[test] fn test_akamai_fingerprint_firefox() { let settings = vec![ SettingParameter { id: SettingId::HeaderTableSize, value: 65536 }, SettingParameter { id: SettingId::InitialWindowSize, value: 131072 }, SettingParameter { id: SettingId::MaxFrameSize, value: 16384 }, ]; let pseudo_headers = vec![ PseudoHeader::Method, PseudoHeader::Path, PseudoHeader::Authority, PseudoHeader::Scheme, ]; let fp = AkamaiFingerprint::new(settings, 12517377, vec![], pseudo_headers); assert_eq!(fp.fingerprint, "1:65536;4:131072;5:16384|12517377|0|m,p,a,s"); } #[test] fn test_akamai_fingerprint_chrome_with_priorities() { let settings = vec![ SettingParameter { id: SettingId::HeaderTableSize, value: 65536 }, SettingParameter { id: SettingId::EnablePush, value: 1 }, ]; let priorities = vec![ Http2Priority { stream_id: 1, exclusive: false, depends_on: 0, weight: 220 }, Http2Priority { stream_id: 3, exclusive: false, depends_on: 0, weight: 200 }, ]; let pseudo_headers = vec![ PseudoHeader::Method, PseudoHeader::Path, PseudoHeader::Authority, PseudoHeader::Scheme, ]; let fp = AkamaiFingerprint::new(settings, 15663105, priorities, pseudo_headers); // Priority weight should be adjusted (220 + 1 = 221, 200 + 1 = 201) assert_eq!(fp.fingerprint, "1:65536;2:1|15663105|1:0:0:221,3:0:0:201|m,p,a,s"); } #[test] fn test_priority_weight_adjustment() { let priority = Http2Priority { stream_id: 1, exclusive: false, depends_on: 0, weight: 220, // 0-255 in frame }; // Display should show 221 (1-256) let display = format!("{priority}"); assert!(display.contains("weight=221")); } #[test] fn test_empty_fingerprint() { // Edge case: no settings, no priorities let fp = AkamaiFingerprint::new(vec![], 0, vec![], vec![]); assert_eq!(fp.fingerprint, "|00|0|"); } #[test] fn test_setting_id_conversion() { assert_eq!(SettingId::from(1), SettingId::HeaderTableSize); assert_eq!(SettingId::from(2), SettingId::EnablePush); assert_eq!(SettingId::from(9), SettingId::NoRfc7540Priorities); assert_eq!(SettingId::from(255), SettingId::Unknown(255)); } #[test] fn test_pseudo_header_display() { assert_eq!(PseudoHeader::Method.to_string(), "m"); assert_eq!(PseudoHeader::Path.to_string(), "p"); assert_eq!(PseudoHeader::Authority.to_string(), "a"); assert_eq!(PseudoHeader::Scheme.to_string(), "s"); assert_eq!(PseudoHeader::Status.to_string(), "st"); } #[test] fn test_fingerprint_hash_consistency() { let settings = vec![SettingParameter { id: SettingId::HeaderTableSize, value: 65536 }]; let pseudo_headers = vec![PseudoHeader::Method]; let fp1 = AkamaiFingerprint::new(settings.clone(), 1000, vec![], pseudo_headers.clone()); let fp2 = AkamaiFingerprint::new(settings, 1000, vec![], pseudo_headers); assert_eq!(fp1.hash, fp2.hash); } #[test] fn test_fingerprint_hash_different_for_different_fingerprints() { let settings1 = vec![SettingParameter { id: SettingId::HeaderTableSize, value: 65536 }]; let settings2 = vec![SettingParameter { id: SettingId::HeaderTableSize, value: 4096, // Different value }]; let pseudo_headers = vec![PseudoHeader::Method]; let fp1 = AkamaiFingerprint::new(settings1, 1000, vec![], pseudo_headers.clone()); let fp2 = AkamaiFingerprint::new(settings2, 1000, vec![], pseudo_headers); assert_ne!(fp1.hash, fp2.hash); }
rust
Apache-2.0
ef479d3357bc040b4dcdacffbb4ab8db07051f1b
2026-01-04T20:21:12.648216Z
false