repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/tests/protocol/request_response.rs | tests/protocol/request_response.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use litep2p::{
config::ConfigBuilder as Litep2pConfigBuilder,
crypto::ed25519::Keypair,
protocol::request_response::{
Config as RequestResponseConfig, ConfigBuilder, DialOptions, RejectReason,
RequestResponseError, RequestResponseEvent,
},
transport::tcp::config::Config as TcpConfig,
types::{protocol::ProtocolName, RequestId},
Litep2p, Litep2pEvent, PeerId,
};
#[cfg(feature = "websocket")]
use litep2p::transport::websocket::config::Config as WebSocketConfig;
use futures::{channel, StreamExt};
use multiaddr::{Multiaddr, Protocol};
use multihash::Multihash;
use tokio::time::sleep;
#[cfg(feature = "quic")]
use std::net::Ipv4Addr;
use std::{
collections::{HashMap, HashSet},
net::Ipv6Addr,
task::Poll,
time::Duration,
};
use crate::common::{add_transport, Transport};
async fn connect_peers(litep2p1: &mut Litep2p, litep2p2: &mut Litep2p) {
let address = litep2p2.listen_addresses().next().unwrap().clone();
tracing::info!("address: {address}");
litep2p1.dial_address(address).await.unwrap();
let mut litep2p1_connected = false;
let mut litep2p2_connected = false;
loop {
tokio::select! {
event = litep2p1.next_event() => if let Litep2pEvent::ConnectionEstablished { .. } = event.unwrap() {
litep2p1_connected = true;
},
event = litep2p2.next_event() => if let Litep2pEvent::ConnectionEstablished { .. } = event.unwrap() {
litep2p2_connected = true;
}
}
if litep2p1_connected && litep2p2_connected {
break;
}
}
sleep(Duration::from_millis(100)).await;
}
#[tokio::test]
async fn send_request_receive_response_tcp() {
send_request_receive_response(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn send_request_receive_response_quic() {
send_request_receive_response(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn send_request_receive_response_websocket() {
send_request_receive_response(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn send_request_receive_response(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, mut handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// send request to remote peer
let request_id = handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap();
assert_eq!(
handle2.next().await.unwrap(),
RequestResponseEvent::RequestReceived {
peer: peer1,
fallback: None,
request_id,
request: vec![1, 3, 3, 7],
}
);
// send response to the received request
handle2.send_response(request_id, vec![1, 3, 3, 8]);
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::ResponseReceived {
peer: peer2,
request_id,
response: vec![1, 3, 3, 8],
fallback: None,
}
);
}
#[tokio::test]
async fn reject_request_tcp() {
reject_request(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn reject_request_quic() {
reject_request(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn reject_request_websocket() {
reject_request(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn reject_request(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, mut handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// send request to remote peer
let request_id = handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap();
if let RequestResponseEvent::RequestReceived {
peer,
fallback: None,
request_id,
request,
} = handle2.next().await.unwrap()
{
assert_eq!(peer, peer1);
assert_eq!(request, vec![1, 3, 3, 7]);
handle2.reject_request(request_id);
} else {
panic!("invalid event received");
};
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::RequestFailed {
peer: peer2,
request_id,
error: RequestResponseError::Rejected(RejectReason::SubstreamClosed)
}
);
}
#[tokio::test]
async fn multiple_simultaneous_requests_tcp() {
multiple_simultaneous_requests(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn multiple_simultaneous_requests_quic() {
multiple_simultaneous_requests(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn multiple_simultaneous_requests_websocket() {
multiple_simultaneous_requests(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn multiple_simultaneous_requests(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, mut handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// send multiple requests to remote peer
let request_id1 = handle1
.send_request(peer2, vec![1, 3, 3, 6], DialOptions::Reject)
.await
.unwrap();
let request_id2 = handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap();
let request_id3 = handle1
.send_request(peer2, vec![1, 3, 3, 8], DialOptions::Reject)
.await
.unwrap();
let request_id4 = handle1
.send_request(peer2, vec![1, 3, 3, 9], DialOptions::Reject)
.await
.unwrap();
let expected: HashMap<RequestId, Vec<u8>> = HashMap::from_iter([
(request_id1, vec![2, 3, 3, 6]),
(request_id2, vec![2, 3, 3, 7]),
(request_id3, vec![2, 3, 3, 8]),
(request_id4, vec![2, 3, 3, 9]),
]);
let expected_requests: Vec<Vec<u8>> = vec![
vec![1, 3, 3, 6],
vec![1, 3, 3, 7],
vec![1, 3, 3, 8],
vec![1, 3, 3, 9],
];
for _ in 0..4 {
if let RequestResponseEvent::RequestReceived {
peer,
fallback: None,
request_id,
mut request,
} = handle2.next().await.unwrap()
{
assert_eq!(peer, peer1);
if expected_requests.iter().any(|req| req == &request) {
request[0] = 2;
handle2.send_response(request_id, request);
} else {
panic!("invalid request received");
}
} else {
panic!("invalid event received");
};
}
for _ in 0..4 {
if let RequestResponseEvent::ResponseReceived {
peer,
request_id,
response,
..
} = handle1.next().await.unwrap()
{
assert_eq!(peer, peer2);
assert_eq!(response, expected.get(&request_id).unwrap().to_vec());
} else {
panic!("invalid event received");
};
}
}
#[tokio::test]
async fn request_timeout_tcp() {
request_timeout(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn request_timeout_quic() {
request_timeout(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn request_timeout_websocket() {
request_timeout(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn request_timeout(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, _handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let _peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// send request to remote peer and wait until the requet timeout occurs
let request_id = handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap();
sleep(Duration::from_secs(7)).await;
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::RequestFailed {
peer: peer2,
request_id,
error: RequestResponseError::Timeout,
}
);
}
#[tokio::test]
async fn protocol_not_supported_tcp() {
protocol_not_supported(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn protocol_not_supported_quic() {
protocol_not_supported(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn protocol_not_supported_websocket() {
protocol_not_supported(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn protocol_not_supported(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, _handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/2"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let _peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// send request to remote peer and wait until the requet timeout occurs
let request_id = handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap();
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::RequestFailed {
peer: peer2,
request_id,
error: RequestResponseError::UnsupportedProtocol,
}
);
}
#[tokio::test]
async fn connection_close_while_request_is_pending_tcp() {
connection_close_while_request_is_pending(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn connection_close_while_request_is_pending_quic() {
connection_close_while_request_is_pending(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn connection_close_while_request_is_pending_websocket() {
connection_close_while_request_is_pending(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn connection_close_while_request_is_pending(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let _peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
let _ = litep2p1.next_event().await;
}
});
// send request to remote peer and wait until the requet timeout occurs
let request_id = handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap();
drop(handle2);
drop(litep2p2);
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::RequestFailed {
peer: peer2,
request_id,
error: RequestResponseError::Rejected(RejectReason::ConnectionClosed),
}
);
}
#[tokio::test]
async fn request_too_big_tcp() {
request_too_big(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn request_too_big_quic() {
request_too_big(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn request_too_big_websocket() {
request_too_big(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn request_too_big(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
256,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, _handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// try to send too large request to remote peer
let request_id =
handle1.send_request(peer2, vec![0u8; 257], DialOptions::Reject).await.unwrap();
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::RequestFailed {
peer: peer2,
request_id,
error: RequestResponseError::TooLargePayload,
}
);
}
#[tokio::test]
async fn response_too_big_tcp() {
response_too_big(
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
Transport::Tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
}),
)
.await;
}
#[cfg(feature = "quic")]
#[tokio::test]
async fn response_too_big_quic() {
response_too_big(
Transport::Quic(Default::default()),
Transport::Quic(Default::default()),
)
.await;
}
#[cfg(feature = "websocket")]
#[tokio::test]
async fn response_too_big_websocket() {
response_too_big(
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
Transport::WebSocket(WebSocketConfig {
listen_addresses: vec!["/ip4/127.0.0.1/tcp/0/ws".parse().unwrap()],
..Default::default()
}),
)
.await;
}
async fn response_too_big(transport1: Transport, transport2: Transport) {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
256,
Duration::from_secs(5),
None,
);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config1);
let config1 = add_transport(config1, transport1).build();
let (req_resp_config2, mut handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
256,
Duration::from_secs(5),
None,
);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_request_response_protocol(req_resp_config2);
let config2 = add_transport(config2, transport2).build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let peer1 = *litep2p1.local_peer_id();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {},
_ = litep2p2.next_event() => {},
}
}
});
// send request to remote peer
let request_id =
handle1.send_request(peer2, vec![0u8; 256], DialOptions::Reject).await.unwrap();
assert_eq!(
handle2.next().await.unwrap(),
RequestResponseEvent::RequestReceived {
peer: peer1,
fallback: None,
request_id,
request: vec![0u8; 256],
}
);
// try to send too large response to the received request
handle2.send_response(request_id, vec![0u8; 257]);
assert_eq!(
handle1.next().await.unwrap(),
RequestResponseEvent::RequestFailed {
peer: peer2,
request_id,
error: RequestResponseError::Rejected(RejectReason::SubstreamClosed),
}
);
}
#[tokio::test]
async fn too_many_pending_requests() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (req_resp_config1, mut handle1) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let mut yamux_config = litep2p::yamux::Config::default();
yamux_config.set_max_num_streams(4);
let config1 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
})
.with_request_response_protocol(req_resp_config1)
.build();
let (req_resp_config2, _handle2) = RequestResponseConfig::new(
ProtocolName::from("/protocol/1"),
Vec::new(),
1024,
Duration::from_secs(5),
None,
);
let mut yamux_config = litep2p::yamux::Config::default();
yamux_config.set_max_num_streams(4);
let config2 = Litep2pConfigBuilder::new()
.with_keypair(Keypair::generate())
.with_tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
})
.with_request_response_protocol(req_resp_config2)
.build();
let mut litep2p1 = Litep2p::new(config1).unwrap();
let mut litep2p2 = Litep2p::new(config2).unwrap();
let peer2 = *litep2p2.local_peer_id();
// wait until peers have connected
connect_peers(&mut litep2p1, &mut litep2p2).await;
// send one over the max requests to remote peer
let mut request_ids = HashSet::new();
request_ids.insert(
handle1
.send_request(peer2, vec![1, 3, 3, 6], DialOptions::Reject)
.await
.unwrap(),
);
request_ids.insert(
handle1
.send_request(peer2, vec![1, 3, 3, 7], DialOptions::Reject)
.await
.unwrap(),
);
request_ids.insert(
handle1
.send_request(peer2, vec![1, 3, 3, 8], DialOptions::Reject)
.await
.unwrap(),
);
request_ids.insert(
handle1
.send_request(peer2, vec![1, 3, 3, 9], DialOptions::Reject)
.await
.unwrap(),
);
request_ids.insert(
handle1
.send_request(peer2, vec![1, 3, 3, 9], DialOptions::Reject)
.await
.unwrap(),
);
let mut litep2p1_closed = false;
let mut litep2p2_closed = false;
while !litep2p1_closed || !litep2p2_closed || !request_ids.is_empty() {
tokio::select! {
event = litep2p1.next_event() => if let Some(Litep2pEvent::ConnectionClosed { .. }) = event {
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | true |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/fuzz/structure-aware/src/main.rs | fuzz/structure-aware/src/main.rs | // Copyright 2025 Security Research Labs GmbH
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use futures::StreamExt;
use litep2p::{
config::ConfigBuilder,
crypto::ed25519::SecretKey,
protocol::{
libp2p::{
bitswap::{BitswapCommand, BitswapHandle, Config as BitswapConfig},
kademlia::{ConfigBuilder as KadConfigBuilder, KademliaCommand, KademliaHandle},
},
notification::{
ConfigBuilder as NotificationConfigBuilder, NotificationCommand, NotificationHandle,
},
request_response::{
ConfigBuilder as RequestResponseConfigBuilder, RequestResponseCommand,
RequestResponseHandle,
},
},
transport::tcp::config::Config as TcpConfig,
Litep2p, ProtocolName,
};
const NUM_WORKER_THREADS: usize = 32;
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub enum FuzzMessage {
RequestResponse(RequestResponseCommand),
Kademlia(KademliaCommand),
Notification(NotificationCommand),
Bitswap(BitswapCommand),
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct FuzzData {
pub data: Vec<(u8, FuzzMessage)>,
}
fn main() {
ziggy::fuzz!(|data: &[u8]| {
let data = data;
let Ok(mut data) = bincode::deserialize::<FuzzData>(data) else {
return;
};
tokio::runtime::Builder::new_current_thread()
.worker_threads(NUM_WORKER_THREADS)
.enable_all()
.build()
.unwrap()
.block_on(async {
let (
mut litep2p1,
mut kad_handle1,
mut bitswap_handle1,
mut rr_handle1,
mut notif_handle1,
) = create_instance(&mut [0u8; 32]);
let (
mut litep2p2,
mut kad_handle2,
mut bitswap_handle2,
mut rr_handle2,
mut notif_handle2,
) = create_instance(&mut [1u8; 32]);
let address = litep2p2.listen_addresses().next().unwrap().clone();
litep2p1.dial_address(address).await.unwrap();
loop {
if let Some((peer, message)) = data.data.pop() {
let handles = if peer % 2 == 0 {
(
&mut kad_handle1,
&mut bitswap_handle1,
&mut rr_handle1,
&mut notif_handle1,
&mut litep2p1,
)
} else {
(
&mut kad_handle2,
&mut bitswap_handle2,
&mut rr_handle2,
&mut notif_handle2,
&mut litep2p2,
)
};
match message {
FuzzMessage::Kademlia(message) => {
let _ = handles.0.add_known_peer(*handles.4.local_peer_id(), vec![handles.4.listen_addresses().next().unwrap().clone()]).await;
let _ = handles.0.fuzz_send_message(message).await;
}
FuzzMessage::Bitswap(message) => {
let _ = handles.1.fuzz_send_message(message).await;
}
FuzzMessage::RequestResponse(message) => {
let _ = handles.2.fuzz_send_message(message).await;
}
FuzzMessage::Notification(message) => {
let _ = handles.3.fuzz_send_message(message).await;
}
};
};
tokio::select! {
_event = litep2p1.next_event() => {},
_event = litep2p2.next_event() => {},
_event = rr_handle1.next() => {},
_event = rr_handle2.next() => {},
_event = kad_handle1.next() => {},
_event = kad_handle2.next() => {},
_event = bitswap_handle1.next() => {},
_event = bitswap_handle2.next() => {},
_event = notif_handle1.next() => {},
_event = notif_handle2.next() => {},
}
if tokio::runtime::Handle::current().metrics().num_alive_tasks() > 6 {
return;
}
}
});
});
}
fn create_instance(
key: &mut [u8; 32],
) -> (
Litep2p,
KademliaHandle,
BitswapHandle,
RequestResponseHandle,
NotificationHandle,
) {
let (kad_config, kad_handle) = KadConfigBuilder::new()
.with_protocol_names(vec![ProtocolName::Allocated("/ksmcc3/kad".into())])
.build();
let (bitswap_config, bitswap_handle) = BitswapConfig::new();
let (rr_config, rr_handle) =
RequestResponseConfigBuilder::new(ProtocolName::Allocated("/ksmcc3/rr".into()))
.with_max_size(1024 * 1024)
.build();
let (_notif_config, notif_handle) =
NotificationConfigBuilder::new(ProtocolName::Allocated("/ksmcc3/notif".into()))
.with_max_size(1024 * 1024)
.with_handshake("fuzz".as_bytes().to_vec())
.build();
let config = ConfigBuilder::new()
.with_libp2p_kademlia(kad_config)
.with_request_response_protocol(rr_config)
.with_libp2p_bitswap(bitswap_config)
.with_tcp(TcpConfig::default())
.with_keypair(SecretKey::try_from_bytes(key).unwrap().into())
.build();
(
Litep2p::new(config).unwrap(),
kad_handle,
bitswap_handle,
rr_handle,
notif_handle,
)
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/fuzz/simple/src/protocol.rs | fuzz/simple/src/protocol.rs | // Copyright 2025 Security Research Labs GmbH
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use litep2p::{
PeerId,
codec::ProtocolCodec,
protocol::{Direction, TransportEvent, TransportService, UserProtocol},
types::protocol::ProtocolName,
};
use bytes::{Buf, BytesMut};
use futures::{SinkExt, StreamExt, future::BoxFuture, stream::FuturesUnordered};
use tokio::sync::mpsc::{Receiver, Sender, channel};
use tokio_util::codec::{Decoder, Encoder, Framed};
use std::collections::{HashMap, hash_map::Entry};
#[derive(Debug)]
struct FuzzCodec;
impl Decoder for FuzzCodec {
type Item = BytesMut;
type Error = litep2p::Error;
/// We do not need to decode.
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
Ok(None)
}
}
impl Encoder<BytesMut> for FuzzCodec {
type Error = std::io::Error;
fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.extend(&item);
Ok(())
}
}
/// Commands sent to the protocol.
#[derive(Debug)]
enum FuzzProtocolCommand {
/// Send `message` to `peer`.
SendMessage { peer_id: PeerId, message: Vec<u8> },
}
#[derive(Debug)]
pub struct FuzzProtocol {
name: &'static str,
/// Channel for receiving commands from user.
cmd_rx: Receiver<FuzzProtocolCommand>,
/// Connected peers.
peers: HashMap<PeerId, Option<Vec<u8>>>,
/// Active inbound substreams.
inbound: FuturesUnordered<BoxFuture<'static, (PeerId, Option<litep2p::Result<BytesMut>>)>>,
/// Active outbound substreams.
outbound: FuturesUnordered<BoxFuture<'static, litep2p::Result<()>>>,
}
#[async_trait::async_trait]
impl UserProtocol for FuzzProtocol {
fn protocol(&self) -> ProtocolName {
ProtocolName::from(self.name)
}
// Protocol code is set to `Unspecified` which means that `litep2p` won't provide
// `Sink + Stream` for the protocol and instead only `AsyncWrite + AsyncRead` are provided.
// User must implement their custom codec on top of `Substream` using, e.g.,
// `tokio_codec::Framed` if they want to have message framing.
fn codec(&self) -> ProtocolCodec {
ProtocolCodec::Unspecified
}
/// Start running event loop for [`FuzzProtocol`].
async fn run(mut self: Box<Self>, mut service: TransportService) -> litep2p::Result<()> {
loop {
tokio::select! {
cmd = self.cmd_rx.recv() => match cmd {
Some(FuzzProtocolCommand::SendMessage { message, peer_id}) => {
let peer = peer_id;
match self.peers.entry(peer) {
// peer doens't exist so dial them and save the message
Entry::Vacant(entry) => match service.dial(&peer) {
Ok(()) => {
entry.insert(Some(message));
}
Err(error) => {
eprintln!("failed to dial {peer:?}: {error:?}");
}
}
// peer exists so open a new substream
Entry::Occupied(mut entry) => match service.open_substream(peer) {
Ok(_) => {
entry.insert(Some(message));
}
Err(error) => {
eprintln!("failed to open substream to {peer:?}: {error:?}");
}
}
}
}
None => return Err(litep2p::Error::EssentialTaskClosed),
},
event = service.next() => match event {
// connection established to peer
//
// check if the peer already exist in the protocol with a pending message
// and if yes, open substream to the peer.
Some(TransportEvent::ConnectionEstablished { peer, .. }) => {
match self.peers.get(&peer) {
Some(Some(_)) => {
if let Err(error) = service.open_substream(peer) {
println!("failed to open substream to {peer:?}: {error:?}");
}
}
Some(None) => {}
None => {
self.peers.insert(peer, None);
}
}
}
// substream opened
//
// for inbound substreams, move the substream to `self.inbound` and poll them for messages
//
// for outbound substreams, move the substream to `self.outbound` and send the saved message to remote peer
Some(TransportEvent::SubstreamOpened { peer, substream, direction, .. }) => {
match direction {
Direction::Inbound => {
self.inbound.push(Box::pin(async move {
(peer, Framed::new(substream, FuzzCodec).next().await)
}));
}
Direction::Outbound(_) => {
let message = self.peers.get_mut(&peer).expect("peer to exist").take().unwrap();
self.outbound.push(Box::pin(async move {
let mut framed = Framed::new(substream, FuzzCodec);
framed.send(BytesMut::from(&message[..])).await.map_err(From::from)
}));
}
}
}
// connection closed, remove all peer context
Some(TransportEvent::ConnectionClosed { peer }) => {
self.peers.remove(&peer);
}
None => return Err(litep2p::Error::EssentialTaskClosed),
_ => {},
},
}
}
}
}
impl FuzzProtocol {
/// Create new [`FuzzProtocol`].
pub fn new(name: &'static str) -> (Self, FuzzProtocolHandle) {
let (cmd_tx, cmd_rx) = channel(64);
(
Self {
name,
cmd_rx,
peers: HashMap::new(),
inbound: FuturesUnordered::new(),
outbound: FuturesUnordered::new(),
},
FuzzProtocolHandle { cmd_tx },
)
}
}
/// Handle for communicating with the protocol.
#[derive(Debug)]
pub struct FuzzProtocolHandle {
cmd_tx: Sender<FuzzProtocolCommand>,
}
impl FuzzProtocolHandle {
pub async fn send_message(&mut self, peer_id: PeerId, message: Vec<u8>) {
let _ = self.cmd_tx.send(FuzzProtocolCommand::SendMessage { peer_id, message }).await;
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/fuzz/simple/src/main.rs | fuzz/simple/src/main.rs | // Copyright 2025 Security Research Labs GmbH
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
mod protocol;
use futures::StreamExt;
use litep2p::{
Litep2p, ProtocolName,
config::ConfigBuilder,
crypto::ed25519::SecretKey,
protocol::{
libp2p::{
bitswap::{BitswapHandle, Config as BitswapConfig},
kademlia::{ConfigBuilder as KadConfigBuilder, KademliaHandle},
},
notification::{ConfigBuilder as NotificationConfigBuilder, NotificationHandle},
request_response::{ConfigBuilder as RequestResponseConfigBuilder, RequestResponseHandle},
},
transport::tcp::config::Config as TcpConfig,
};
use protocol::{FuzzProtocol, FuzzProtocolHandle};
const NUM_WORKER_THREADS: usize = 32;
const NUM_PROTOCOLS: u8 = 4;
fn main() {
ziggy::fuzz!(|data: &[u8]| {
if data.len() < 2 {
return;
};
let protocol = data[0] % NUM_PROTOCOLS;
tokio::runtime::Builder::new_current_thread()
.worker_threads(NUM_WORKER_THREADS)
.enable_all()
.build()
.unwrap()
.block_on(async {
let (mut litep2p1, _kad_handle1, _bitswap_handle1, _rr_handle1, _notif_handle1) =
create_instance(&mut [0u8; 32]);
let (
mut litep2p2,
mut kad_handle2,
mut bitswap_handle2,
mut rr_handle2,
mut notif_handle2,
) = create_instance_fuzz(&mut [1u8; 32]);
let peer = *litep2p1.local_peer_id();
let address = litep2p2.listen_addresses().next().unwrap().clone();
litep2p1.dial_address(address).await.unwrap();
match protocol {
0 => kad_handle2.send_message(peer, data[1..].to_vec()).await,
1 => bitswap_handle2.send_message(peer, data[1..].to_vec()).await,
2 => rr_handle2.send_message(peer, data[1..].to_vec()).await,
3 => notif_handle2.send_message(peer, data[1..].to_vec()).await,
_ => unreachable!(),
}
loop {
tokio::select! {
_event = litep2p1.next_event() => {},
_event = litep2p2.next_event() => {},
}
if tokio::runtime::Handle::current().metrics().num_alive_tasks() > 6 {
return;
}
}
});
});
}
fn create_instance(
key: &mut [u8; 32],
) -> (
Litep2p,
KademliaHandle,
BitswapHandle,
RequestResponseHandle,
NotificationHandle,
) {
let (kad_config, kad_handle) = KadConfigBuilder::new()
.with_protocol_names(vec![ProtocolName::Allocated("/ksmcc3/kad".into())])
.build();
let (bitswap_config, bitswap_handle) = BitswapConfig::new();
let (rr_config, rr_handle) =
RequestResponseConfigBuilder::new(ProtocolName::Allocated("/ksmcc3/rr".into()))
.with_max_size(1024 * 1024)
.build();
let (_notif_config, notif_handle) =
NotificationConfigBuilder::new(ProtocolName::Allocated("/ksmcc3/notif".into()))
.with_max_size(1024 * 1024)
.with_handshake("fuzz".as_bytes().to_vec())
.build();
let config = ConfigBuilder::new()
.with_libp2p_kademlia(kad_config)
.with_request_response_protocol(rr_config)
.with_libp2p_bitswap(bitswap_config)
.with_tcp(TcpConfig::default())
.with_keypair(SecretKey::try_from_bytes(key).unwrap().into())
.build();
(
Litep2p::new(config).unwrap(),
kad_handle,
bitswap_handle,
rr_handle,
notif_handle,
)
}
fn create_instance_fuzz(
key: &mut [u8; 32],
) -> (
Litep2p,
FuzzProtocolHandle,
FuzzProtocolHandle,
FuzzProtocolHandle,
FuzzProtocolHandle,
) {
let (custom_kad, kad_handle) = FuzzProtocol::new("/kscmcc3/kad");
let (custom_notif, bitswap_handle) = FuzzProtocol::new("/ipfs/bitswap/1.2.0");
let (custom_rr, rr_handle) = FuzzProtocol::new("/kscmcc3/rr");
let (custom_bitswap, notif_handle) = FuzzProtocol::new("/kscmcc3/notif");
let config = ConfigBuilder::new()
.with_user_protocol(Box::new(custom_kad))
.with_user_protocol(Box::new(custom_notif))
.with_user_protocol(Box::new(custom_rr))
.with_user_protocol(Box::new(custom_bitswap))
.with_tcp(TcpConfig::default())
.with_keypair(SecretKey::try_from_bytes(key).unwrap().into())
.build();
(
Litep2p::new(config).unwrap(),
kad_handle,
bitswap_handle,
rr_handle,
notif_handle,
)
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/examples/syncing.rs | examples/syncing.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This example demonstrates how application using `litep2p` might structure itself
//! to implement, e.g, a syncing protocol using notification and request-response protocols
use litep2p::{
config::ConfigBuilder,
protocol::{
notification::{
Config as NotificationConfig, ConfigBuilder as NotificationConfigBuilder,
NotificationHandle,
},
request_response::{
Config as RequestResponseConfig, ConfigBuilder as RequestResponseConfigBuilder,
RequestResponseHandle,
},
},
transport::quic::config::Config as QuicConfig,
types::protocol::ProtocolName,
Litep2p,
};
use futures::StreamExt;
/// Object responsible for syncing the blockchain.
struct SyncingEngine {
/// Notification handle used to send and receive notifications.
block_announce_handle: NotificationHandle,
/// Request-response handle used to send and receive block requests/responses.
block_sync_handle: RequestResponseHandle,
/// Request-response handle used to send and receive state requests/responses.
state_sync_handle: RequestResponseHandle,
}
impl SyncingEngine {
/// Create new [`SyncingEngine`].
fn new() -> (
Self,
NotificationConfig,
RequestResponseConfig,
RequestResponseConfig,
) {
let (block_announce_config, block_announce_handle) = Self::init_block_announce();
let (block_sync_config, block_sync_handle) = Self::init_block_sync();
let (state_sync_config, state_sync_handle) = Self::init_state_sync();
(
Self {
block_announce_handle,
block_sync_handle,
state_sync_handle,
},
block_announce_config,
block_sync_config,
state_sync_config,
)
}
/// Initialize notification protocol for block announcements
fn init_block_announce() -> (NotificationConfig, NotificationHandle) {
NotificationConfigBuilder::new(ProtocolName::from("/notif/block-announce/1"))
.with_max_size(1024usize)
.with_handshake(vec![1, 2, 3, 4])
.build()
}
/// Initialize request-response protocol for block syncing.
fn init_block_sync() -> (RequestResponseConfig, RequestResponseHandle) {
RequestResponseConfigBuilder::new(ProtocolName::from("/sync/block/1"))
.with_max_size(1024 * 1024)
.build()
}
/// Initialize request-response protocol for state syncing.
fn init_state_sync() -> (RequestResponseConfig, RequestResponseHandle) {
RequestResponseConfigBuilder::new(ProtocolName::from("/sync/state/1"))
.with_max_size(1024 * 1024)
.build()
}
/// Start event loop for [`SyncingEngine`].
async fn run(mut self) {
loop {
tokio::select! {
_ = self.block_announce_handle.next() => {}
_ = self.block_sync_handle.next() => {}
_ = self.state_sync_handle.next() => {}
}
}
}
}
#[tokio::main]
async fn main() {
// create `SyncingEngine` and get configs for the protocols that it will use.
let (engine, block_announce_config, block_sync_config, state_sync_config) =
SyncingEngine::new();
// build `Litep2pConfig`
let config = ConfigBuilder::new()
.with_quic(QuicConfig {
listen_addresses: vec!["/ip4/127.0.0.1/udp/0/quic-v1".parse().unwrap()],
..Default::default()
})
.with_notification_protocol(block_announce_config)
.with_request_response_protocol(block_sync_config)
.with_request_response_protocol(state_sync_config)
.build();
// create `Litep2p` object and start internal protocol handlers and the QUIC transport
let mut litep2p = Litep2p::new(config).unwrap();
// spawn `SyncingEngine` in the background
tokio::spawn(engine.run());
// poll `litep2p` to allow connection-related activity to make progress
loop {
match litep2p.next_event().await.unwrap() {
_ => {}
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/examples/custom_protocol.rs | examples/custom_protocol.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This example demonstrates how to implement a custom protocol for litep2p.
use litep2p::{
codec::ProtocolCodec,
config::ConfigBuilder,
protocol::{Direction, TransportEvent, TransportService, UserProtocol},
types::protocol::ProtocolName,
Litep2p, PeerId,
};
use bytes::{Buf, BufMut, BytesMut};
use futures::{future::BoxFuture, stream::FuturesUnordered, SinkExt, StreamExt};
use tokio::sync::mpsc::{channel, Receiver, Sender};
use tokio_util::codec::{Decoder, Encoder, Framed};
use std::collections::{hash_map::Entry, HashMap};
#[derive(Debug)]
struct CustomCodec;
impl Decoder for CustomCodec {
type Item = BytesMut;
type Error = litep2p::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None);
}
let len = src.get_u8() as usize;
if src.len() >= len {
let mut out = BytesMut::with_capacity(len);
out.put_slice(&src[..len]);
src.advance(len);
return Ok(Some(out));
}
Ok(None)
}
}
impl Encoder<BytesMut> for CustomCodec {
type Error = std::io::Error;
fn encode(&mut self, item: BytesMut, dst: &mut BytesMut) -> Result<(), Self::Error> {
if item.len() > u8::MAX as usize {
return Err(std::io::ErrorKind::PermissionDenied.into());
}
dst.put_u8(item.len() as u8);
dst.extend(&item);
Ok(())
}
}
/// Events received from the protocol.
#[derive(Debug)]
enum CustomProtocolEvent {
/// Received `message` from `peer`.
MessageReceived {
/// Peer ID.
peer: PeerId,
/// Message.
message: Vec<u8>,
},
}
/// Commands sent to the protocol.
#[derive(Debug)]
enum CustomProtocolCommand {
/// Send `message` to `peer`.
SendMessage {
/// Peer ID.
peer: PeerId,
/// Message.
message: Vec<u8>,
},
}
/// Handle for communicating with the protocol.
#[derive(Debug)]
struct CustomProtocolHandle {
cmd_tx: Sender<CustomProtocolCommand>,
event_rx: Receiver<CustomProtocolEvent>,
}
#[derive(Debug)]
struct CustomProtocol {
/// Channel for receiving commands from user.
cmd_rx: Receiver<CustomProtocolCommand>,
/// Channel for sending events to user.
event_tx: Sender<CustomProtocolEvent>,
/// Connected peers.
peers: HashMap<PeerId, Option<Vec<u8>>>,
/// Active inbound substreams.
inbound: FuturesUnordered<BoxFuture<'static, (PeerId, Option<litep2p::Result<BytesMut>>)>>,
/// Active outbound substreams.
outbound: FuturesUnordered<BoxFuture<'static, litep2p::Result<()>>>,
}
impl CustomProtocol {
/// Create new [`CustomProtocol`].
pub fn new() -> (Self, CustomProtocolHandle) {
let (event_tx, event_rx) = channel(64);
let (cmd_tx, cmd_rx) = channel(64);
(
Self {
cmd_rx,
event_tx,
peers: HashMap::new(),
inbound: FuturesUnordered::new(),
outbound: FuturesUnordered::new(),
},
CustomProtocolHandle { cmd_tx, event_rx },
)
}
}
#[async_trait::async_trait]
impl UserProtocol for CustomProtocol {
fn protocol(&self) -> ProtocolName {
ProtocolName::from("/custom-protocol/1")
}
// Protocol code is set to `Unspecified` which means that `litep2p` won't provide
// `Sink + Stream` for the protocol and instead only `AsyncWrite + AsyncRead` are provided.
// User must implement their custom codec on top of `Substream` using, e.g.,
// `tokio_codec::Framed` if they want to have message framing.
fn codec(&self) -> ProtocolCodec {
ProtocolCodec::Unspecified
}
/// Start running event loop for [`CustomProtocol`].
async fn run(mut self: Box<Self>, mut service: TransportService) -> litep2p::Result<()> {
loop {
tokio::select! {
cmd = self.cmd_rx.recv() => match cmd {
Some(CustomProtocolCommand::SendMessage { peer, message }) => {
match self.peers.entry(peer) {
// peer doens't exist so dial them and save the message
Entry::Vacant(entry) => match service.dial(&peer) {
Ok(()) => {
entry.insert(Some(message));
}
Err(error) => {
eprintln!("failed to dial {peer:?}: {error:?}");
}
}
// peer exists so open a new substream
Entry::Occupied(mut entry) => match service.open_substream(peer) {
Ok(_) => {
entry.insert(Some(message));
}
Err(error) => {
eprintln!("failed to open substream to {peer:?}: {error:?}");
}
}
}
}
None => return Err(litep2p::Error::EssentialTaskClosed),
},
event = service.next() => match event {
// connection established to peer
//
// check if the peer already exist in the protocol with a pending message
// and if yes, open substream to the peer.
Some(TransportEvent::ConnectionEstablished { peer, .. }) => {
match self.peers.get(&peer) {
Some(Some(_)) => {
if let Err(error) = service.open_substream(peer) {
println!("failed to open substream to {peer:?}: {error:?}");
}
}
Some(None) => {}
None => {
self.peers.insert(peer, None);
}
}
}
// substream opened
//
// for inbound substreams, move the substream to `self.inbound` and poll them for messages
//
// for outbound substreams, move the substream to `self.outbound` and send the saved message to remote peer
Some(TransportEvent::SubstreamOpened { peer, substream, direction, .. }) => {
match direction {
Direction::Inbound => {
self.inbound.push(Box::pin(async move {
(peer, Framed::new(substream, CustomCodec).next().await)
}));
}
Direction::Outbound(_) => {
let message = self.peers.get_mut(&peer).expect("peer to exist").take().unwrap();
self.outbound.push(Box::pin(async move {
let mut framed = Framed::new(substream, CustomCodec);
framed.send(BytesMut::from(&message[..])).await.map_err(From::from)
}));
}
}
}
// connection closed, remove all peer context
Some(TransportEvent::ConnectionClosed { peer }) => {
self.peers.remove(&peer);
}
None => return Err(litep2p::Error::EssentialTaskClosed),
_ => {},
},
// poll inbound substreams for messages
event = self.inbound.next(), if !self.inbound.is_empty() => match event {
Some((peer, Some(Ok(message)))) => {
self.event_tx.send(CustomProtocolEvent::MessageReceived {
peer,
message: message.into(),
}).await.unwrap();
}
event => eprintln!("failed to read message from an inbound substream: {event:?}"),
},
// poll outbound substreams so that they can make progress
_ = self.outbound.next(), if !self.outbound.is_empty() => {}
}
}
}
}
fn make_litep2p() -> (Litep2p, CustomProtocolHandle) {
let (custom_protocol, handle) = CustomProtocol::new();
(
Litep2p::new(
ConfigBuilder::new()
.with_tcp(Default::default())
.with_user_protocol(Box::new(custom_protocol))
.build(),
)
.unwrap(),
handle,
)
}
#[tokio::main]
async fn main() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (mut litep2p1, handle1) = make_litep2p();
let (mut litep2p2, mut handle2) = make_litep2p();
let peer2 = *litep2p2.local_peer_id();
let listen_address = litep2p2.listen_addresses().next().unwrap().clone();
litep2p1.add_known_address(peer2, std::iter::once(listen_address));
tokio::spawn(async move {
loop {
tokio::select! {
_ = litep2p1.next_event() => {}
_ = litep2p2.next_event() => {}
}
}
});
for message in [
b"hello, world".to_vec(),
b"testing 123".to_vec(),
b"goodbye, world".to_vec(),
] {
handle1
.cmd_tx
.send(CustomProtocolCommand::SendMessage {
peer: peer2,
message,
})
.await
.unwrap();
let CustomProtocolEvent::MessageReceived { peer, message } =
handle2.event_rx.recv().await.unwrap();
println!(
"received message from {peer:?}: {:?}",
std::str::from_utf8(&message)
);
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/examples/custom_executor.rs | examples/custom_executor.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This examples demonstrates how a custom task executor can be used with litep2p.
//!
//! In general, a custom task executor is not needed and litep2p defaults to calling
//! `tokio::spawn()` for futures that should be run in the background but if user wishes
//! to add some extra features, such as couting how many times each future has been polled
//! and for how long, they can be implemented on top of the custom task executor.
//!
//! Run: `RUST_LOG=info cargo run --example custom_executor`
use litep2p::{
config::ConfigBuilder,
executor::Executor,
protocol::libp2p::ping::{Config as PingConfig, PingEvent},
transport::tcp::config::Config as TcpConfig,
Litep2p,
};
use futures::{future::BoxFuture, stream::FuturesUnordered, Stream, StreamExt};
use tokio::sync::mpsc::{channel, Receiver, Sender};
use std::{future::Future, pin::Pin, sync::Arc};
/// Task executor.
///
/// Just a wrapper around `FuturesUnordered` which receives the futures over `mpsc::Receiver`.
struct TaskExecutor {
rx: Receiver<Pin<Box<dyn Future<Output = ()> + Send>>>,
futures: FuturesUnordered<BoxFuture<'static, ()>>,
}
impl TaskExecutor {
/// Create new [`TaskExecutor`].
fn new() -> (Self, Sender<Pin<Box<dyn Future<Output = ()> + Send>>>) {
let (tx, rx) = channel(64);
(
Self {
rx,
futures: FuturesUnordered::new(),
},
tx,
)
}
/// Drive the futures forward and poll the receiver for any new futures.
async fn next(&mut self) {
loop {
tokio::select! {
future = self.rx.recv() => self.futures.push(future.unwrap()),
_ = self.futures.next(), if !self.futures.is_empty() => {}
}
}
}
}
struct TaskExecutorHandle {
tx: Sender<Pin<Box<dyn Future<Output = ()> + Send>>>,
}
impl Executor for TaskExecutorHandle {
fn run(&self, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
let _ = self.tx.try_send(future);
}
fn run_with_name(&self, _: &'static str, future: Pin<Box<dyn Future<Output = ()> + Send>>) {
let _ = self.tx.try_send(future);
}
}
fn make_litep2p() -> (
Litep2p,
TaskExecutor,
Box<dyn Stream<Item = PingEvent> + Send + Unpin>,
) {
let (executor, sender) = TaskExecutor::new();
let (ping_config, ping_event_stream) = PingConfig::default();
let litep2p = Litep2p::new(
ConfigBuilder::new()
.with_executor(Arc::new(TaskExecutorHandle { tx: sender.clone() }))
.with_tcp(TcpConfig {
listen_addresses: vec!["/ip6/::1/tcp/0".parse().unwrap()],
..Default::default()
})
.with_libp2p_ping(ping_config)
.build(),
)
.unwrap();
(litep2p, executor, ping_event_stream)
}
#[tokio::main]
async fn main() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
// create two identical litep2ps
let (mut litep2p1, mut executor1, mut ping_event_stream1) = make_litep2p();
let (mut litep2p2, mut executor2, mut ping_event_stream2) = make_litep2p();
// dial `litep2p1`
litep2p2
.dial_address(litep2p1.listen_addresses().next().unwrap().clone())
.await
.unwrap();
tokio::spawn(async move {
loop {
tokio::select! {
_ = executor1.next() => {}
_ = litep2p1.next_event() => {},
_ = ping_event_stream1.next() => {},
}
}
});
// poll litep2p, task executor and ping event stream all together
//
// since a custom task executor was provided, it's now the user's responsibility
// to actually make sure to poll those futures so that litep2p can make progress
loop {
tokio::select! {
_ = executor2.next() => {}
_ = litep2p2.next_event() => {},
event = ping_event_stream2.next() =>
if let Some(PingEvent::Ping { peer, ping }) = event {
tracing::info!("ping time with {peer:?}: {ping:?}")
}
}
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/examples/echo_notification.rs | examples/echo_notification.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This example demonstrates a simple echo server using the notification protocol
//! in which client connects to server and sends a message to server every 3 seconds
//!
//! Run: `cargo run --example echo_notification`
use litep2p::{
config::ConfigBuilder,
protocol::notification::{
ConfigBuilder as NotificationConfigBuilder, NotificationEvent, NotificationHandle,
ValidationResult,
},
transport::quic::config::Config as QuicConfig,
types::protocol::ProtocolName,
Litep2p, PeerId,
};
use futures::StreamExt;
use std::time::Duration;
/// event loop for the client
async fn client_event_loop(mut litep2p: Litep2p, mut handle: NotificationHandle, peer: PeerId) {
// open substream to `peer`
//
// if `litep2p` is not connected to `peer` but it has at least one known address,
// `NotifcationHandle::open_substream()` will automatically dial `peer`
handle.open_substream(peer).await.unwrap();
// wait until the substream is opened
loop {
tokio::select! {
_ = litep2p.next_event() => {}
event = handle.next() =>
if let NotificationEvent::NotificationStreamOpened { .. } = event.unwrap() {
break
}
}
}
// after the substream is open, send notification to server and print the response to stdout
loop {
tokio::select! {
_ = litep2p.next_event() => {}
event = handle.next() =>
if let NotificationEvent::NotificationReceived { peer, notification } = event.unwrap() {
println!("received response from server ({peer:?}): {notification:?}");
},
_ = tokio::time::sleep(Duration::from_secs(3)) => {
handle.send_sync_notification(peer, vec![1, 3, 3, 7]).unwrap();
}
}
}
}
/// event loop for the server
async fn server_event_loop(mut litep2p: Litep2p, mut handle: NotificationHandle) {
loop {
tokio::select! {
_ = litep2p.next_event() => {}
event = handle.next() => match event.unwrap() {
NotificationEvent::ValidateSubstream { peer, .. } => {
handle.send_validation_result(peer, ValidationResult::Accept);
}
NotificationEvent::NotificationReceived { peer, notification } => {
handle.send_async_notification(peer, notification.freeze().into()).await.unwrap();
}
_ => {},
},
}
}
}
/// helper function for creating `Litep2p` object
fn make_litep2p() -> (Litep2p, NotificationHandle) {
// build notification config for the notification protocol
let (echo_config, echo_handle) = NotificationConfigBuilder::new(ProtocolName::from("/echo/1"))
.with_max_size(256)
.with_auto_accept_inbound(true)
.with_handshake(vec![1, 3, 3, 7])
.build();
// build `Litep2p` object and return it + notification handle
(
Litep2p::new(
ConfigBuilder::new()
.with_quic(QuicConfig {
listen_addresses: vec!["/ip4/127.0.0.1/udp/0/quic-v1".parse().unwrap()],
..Default::default()
})
.with_notification_protocol(echo_config)
.build(),
)
.unwrap(),
echo_handle,
)
}
#[tokio::main]
async fn main() {
// build `Litep2p` objects for both peers
let (mut litep2p1, echo_handle1) = make_litep2p();
let (litep2p2, echo_handle2) = make_litep2p();
// get the first (and only) listen address for the second peer
// and add it as a known address for `litep2p1`
let listen_address = litep2p2.listen_addresses().next().unwrap().clone();
let peer = *litep2p2.local_peer_id();
litep2p1.add_known_address(peer, vec![listen_address].into_iter());
// start event loops for client and server
tokio::spawn(client_event_loop(litep2p1, echo_handle1, peer));
tokio::spawn(server_event_loop(litep2p2, echo_handle2));
loop {
tokio::time::sleep(Duration::from_secs(10)).await;
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/examples/mdns_and_ping.rs | examples/mdns_and_ping.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This examples demonstrates using mDNS to discover peers in the local network and
//! calculating their PING time.
use litep2p::{
config::ConfigBuilder,
protocol::{
libp2p::ping::{Config as PingConfig, PingEvent},
mdns::{Config as MdnsConfig, MdnsEvent},
},
Litep2p,
};
use futures::{Stream, StreamExt};
use std::time::Duration;
/// simple event loop which discovers peers over mDNS,
/// establishes a connection to them and calculates the PING time
async fn peer_event_loop(
mut litep2p: Litep2p,
mut ping_event_stream: Box<dyn Stream<Item = PingEvent> + Send + Unpin>,
mut mdns_event_stream: Box<dyn Stream<Item = MdnsEvent> + Send + Unpin>,
) {
loop {
tokio::select! {
_ = litep2p.next_event() => {}
event = ping_event_stream.next() => match event.unwrap() {
PingEvent::Ping { peer, ping } => {
println!("ping received from {peer:?}: {ping:?}");
}
},
event = mdns_event_stream.next() => match event.unwrap() {
MdnsEvent::Discovered(addresses) => {
litep2p.dial_address(addresses[0].clone()).await.unwrap();
}
}
}
}
}
/// helper function for creating `Litep2p` object
fn make_litep2p() -> (
Litep2p,
Box<dyn Stream<Item = PingEvent> + Send + Unpin>,
Box<dyn Stream<Item = MdnsEvent> + Send + Unpin>,
) {
// initialize IPFS ping and mDNS
let (ping_config, ping_event_stream) = PingConfig::default();
let (mdns_config, mdns_event_stream) = MdnsConfig::new(Duration::from_secs(30));
// build `Litep2p`, passing in configurations for IPFS and mDNS
let litep2p_config = ConfigBuilder::new()
// `litep2p` will bind to `/ip6/::1/tcp/0` by default
.with_tcp(Default::default())
.with_libp2p_ping(ping_config)
.with_mdns(mdns_config)
.build();
// build `Litep2p` and return it + event streams
(
Litep2p::new(litep2p_config).unwrap(),
ping_event_stream,
mdns_event_stream,
)
}
#[tokio::main]
async fn main() {
// initialize `Litep2p` objects for the peers
let (litep2p1, ping_event_stream1, mdns_event_stream1) = make_litep2p();
let (litep2p2, ping_event_stream2, mdns_event_stream2) = make_litep2p();
// starts separate tasks for the first and second peer
tokio::spawn(peer_event_loop(
litep2p1,
ping_event_stream1,
mdns_event_stream1,
));
tokio::spawn(peer_event_loop(
litep2p2,
ping_event_stream2,
mdns_event_stream2,
));
loop {
tokio::time::sleep(Duration::from_secs(10)).await;
}
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
paritytech/litep2p | https://github.com/paritytech/litep2p/blob/991aa12f60db41543735394bf71fba09332752f8/examples/gossiping.rs | examples/gossiping.rs | // Copyright 2023 litep2p developers
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This example demonstrates how application can implement transaction gossiping.
//!
//! Run: `RUST_LOG=gossiping=info cargo run --example gossiping`
use litep2p::{
config::ConfigBuilder,
protocol::notification::{
Config as NotificationConfig, ConfigBuilder as NotificationConfigBuilder,
NotificationEvent, NotificationHandle, ValidationResult,
},
types::protocol::ProtocolName,
Litep2p, PeerId,
};
use futures::StreamExt;
use tokio::sync::mpsc::{channel, Receiver, Sender};
use std::{
collections::{HashMap, HashSet},
time::Duration,
};
/// Dummy transaction.
#[derive(Debug, Hash, PartialEq, Eq, Clone)]
struct Transaction {
tx: Vec<u8>,
}
/// Handle which allows communicating with [`TransactionProtocol`].
struct TransactionProtocolHandle {
tx: Sender<Transaction>,
}
impl TransactionProtocolHandle {
/// Create new [`TransactionProtocolHandle`].
fn new() -> (Self, Receiver<Transaction>) {
let (tx, rx) = channel(64);
(Self { tx }, rx)
}
/// Announce transaction by sending it to the [`TransactionProtocol`] which will send
/// it to all peers who don't have it yet.
async fn announce_transaction(&self, tx: Transaction) {
self.tx.send(tx).await.unwrap();
}
}
/// Transaction protocol.
struct TransactionProtocol {
/// Notification handle used to send and receive notifications.
tx_handle: NotificationHandle,
/// Handle for receiving transactions from user that should be sent to connected peers.
rx: Receiver<Transaction>,
/// Connected peers.
peers: HashMap<PeerId, HashSet<Transaction>>,
/// Seen transactions.
seen: HashSet<Transaction>,
}
impl TransactionProtocol {
fn new() -> (Self, NotificationConfig, TransactionProtocolHandle) {
let (tx_config, tx_handle) = Self::init_tx_announce();
let (handle, rx) = TransactionProtocolHandle::new();
(
Self {
tx_handle,
rx,
peers: HashMap::new(),
seen: HashSet::new(),
},
tx_config,
handle,
)
}
/// Initialize notification protocol for transactions.
fn init_tx_announce() -> (NotificationConfig, NotificationHandle) {
NotificationConfigBuilder::new(ProtocolName::from("/notif/tx/1"))
.with_max_size(1024usize)
.with_handshake(vec![1, 2, 3, 4])
.build()
}
/// Poll next transaction from the protocol.
async fn next(&mut self) -> Option<(PeerId, Transaction)> {
loop {
tokio::select! {
event = self.tx_handle.next() => match event? {
NotificationEvent::ValidateSubstream { peer, .. } => {
tracing::info!("inbound substream received from {peer}");
self.tx_handle.send_validation_result(peer, ValidationResult::Accept);
}
NotificationEvent::NotificationStreamOpened { peer, .. } => {
tracing::info!("substream opened for {peer}");
self.peers.insert(peer, HashSet::new());
}
NotificationEvent::NotificationStreamClosed { peer } => {
tracing::info!("substream closed for {peer}");
self.peers.remove(&peer);
}
NotificationEvent::NotificationReceived { peer, notification } => {
tracing::info!("transaction received from {peer}: {notification:?}");
// send transaction to all peers who don't have it yet
let notification = notification.freeze();
for (connected, txs) in &mut self.peers {
let not_seen = txs.insert(Transaction { tx: notification.clone().into() });
if connected != &peer && not_seen {
self.tx_handle.send_sync_notification(
*connected,
notification.clone().into(),
).unwrap();
}
}
if self.seen.insert(Transaction { tx: notification.clone().into() }) {
return Some((peer, Transaction { tx: notification.clone().into() }))
}
}
_ => {}
},
tx = self.rx.recv() => match tx {
None => return None,
Some(transaction) => {
// send transaction to all peers who don't have it yet
self.seen.insert(transaction.clone());
for (peer, txs) in &mut self.peers {
if txs.insert(transaction.clone()) {
self.tx_handle.send_sync_notification(
*peer,
transaction.tx.clone(),
).unwrap();
}
}
}
}
}
}
}
/// Start event loop for [`TransactionProtocol`].
async fn run(mut self) {
loop {
match self.next().await {
Some((peer, tx)) => {
tracing::info!("received transaction from {peer}: {tx:?}");
}
None => return,
}
}
}
}
async fn await_substreams(
tx1: &mut TransactionProtocol,
tx2: &mut TransactionProtocol,
tx3: &mut TransactionProtocol,
tx4: &mut TransactionProtocol,
) {
loop {
tokio::select! {
_ = tx1.next() => {}
_ = tx2.next() => {}
_ = tx3.next() => {}
_ = tx4.next() => {}
_ = tokio::time::sleep(Duration::from_secs(2)) => {
if tx1.peers.len() == 1 && tx2.peers.len() == 3 && tx3.peers.len() == 1 && tx4.peers.len() == 1 {
return
}
}
}
}
}
/// Initialize peer with transaction protocol enabled.
fn tx_peer() -> (Litep2p, TransactionProtocol, TransactionProtocolHandle) {
// initialize `TransctionProtocol`
let (tx, tx_announce_config, tx_handle) = TransactionProtocol::new();
// build `Litep2pConfig`
let config = ConfigBuilder::new()
.with_tcp(Default::default())
.with_notification_protocol(tx_announce_config)
.build();
// create `Litep2p` object and start internal protocol handlers and the QUIC transport
let litep2p = Litep2p::new(config).unwrap();
(litep2p, tx, tx_handle)
}
#[tokio::main]
async fn main() {
let _ = tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.try_init();
let (mut litep2p1, mut tx1, tx_handle1) = tx_peer();
let (mut litep2p2, mut tx2, _tx_handle2) = tx_peer();
let (mut litep2p3, mut tx3, tx_handle3) = tx_peer();
let (mut litep2p4, mut tx4, tx_handle4) = tx_peer();
tracing::info!("litep2p1: {}", litep2p1.local_peer_id());
tracing::info!("litep2p2: {}", litep2p2.local_peer_id());
tracing::info!("litep2p3: {}", litep2p3.local_peer_id());
tracing::info!("litep2p4: {}", litep2p4.local_peer_id());
// establish connection to litep2p for all other litep2ps
let peer2 = *litep2p2.local_peer_id();
let listen_address = litep2p2.listen_addresses().next().unwrap().clone();
litep2p1.add_known_address(peer2, vec![listen_address.clone()].into_iter());
litep2p3.add_known_address(peer2, vec![listen_address.clone()].into_iter());
litep2p4.add_known_address(peer2, vec![listen_address].into_iter());
tokio::spawn(async move { while let Some(_) = litep2p1.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p2.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p3.next_event().await {} });
tokio::spawn(async move { while let Some(_) = litep2p4.next_event().await {} });
// open substreams
tx1.tx_handle.open_substream(peer2).await.unwrap();
tx3.tx_handle.open_substream(peer2).await.unwrap();
tx4.tx_handle.open_substream(peer2).await.unwrap();
// wait a moment for substream to open and start `TransactionProtocol` event loops
await_substreams(&mut tx1, &mut tx2, &mut tx3, &mut tx4).await;
tokio::spawn(tx1.run());
tokio::spawn(tx2.run());
tokio::spawn(tx3.run());
tokio::spawn(tx4.run());
// annouce three transactions over three different handles
tx_handle1
.announce_transaction(Transaction {
tx: vec![1, 2, 3, 4],
})
.await;
tx_handle3
.announce_transaction(Transaction {
tx: vec![1, 3, 3, 7],
})
.await;
tx_handle4
.announce_transaction(Transaction {
tx: vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
})
.await;
// allow protocols to process announced transactions before exiting
tokio::time::sleep(Duration::from_secs(3)).await;
}
| rust | MIT | 991aa12f60db41543735394bf71fba09332752f8 | 2026-01-04T20:20:42.179941Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/html.rs | src/html.rs | use parser::Block;
use parser::Block::{
Blockquote, CodeBlock, Header, Hr, LinkReference, OrderedList, Paragraph, Raw, UnorderedList,
};
use parser::Span::{Break, Code, Emphasis, Image, Link, Literal, RefLink, Strong, Text};
use parser::{ListItem, OrderedListType, Span};
use regex::Regex;
use std::collections::HashMap;
type LinkReferenceMap<'a> = HashMap<&'a str, (&'a str, &'a Option<String>)>;
// takes a number of elements and returns their collective text as a slug
fn slugify(elements: &[Span], no_spaces: bool) -> String {
let mut ret = String::new();
for el in elements {
let next = match *el {
Break => "".to_owned(),
Literal(character) => character.to_string(),
Text(ref text) | Image(ref text, _, _) | Code(ref text) => text.trim().to_lowercase(),
RefLink(ref content, _, _)
| Link(ref content, _, _)
| Strong(ref content)
| Emphasis(ref content) => slugify(content, no_spaces),
};
if !ret.is_empty() {
ret.push('_');
}
ret.push_str(&next);
}
if no_spaces {
ret = ret.replace(" ", "_");
}
ret
}
pub fn to_html(blocks: &[Block]) -> String {
let mut ret = String::new();
let mut link_references: LinkReferenceMap = HashMap::new();
for block in blocks.iter() {
match block {
LinkReference(ref id, ref text, ref title) => {
link_references.insert(id, (text, title));
}
_ => {}
};
}
for block in blocks.iter() {
let next = match block {
Header(ref elements, level) => format_header(elements, *level, &link_references),
Paragraph(ref elements) => format_paragraph(elements, &link_references),
Blockquote(ref elements) => format_blockquote(elements),
CodeBlock(ref lang, ref elements) => format_codeblock(lang, elements),
UnorderedList(ref elements) => format_unordered_list(elements, &link_references),
OrderedList(ref elements, ref num_type) => {
format_ordered_list(elements, num_type, &link_references)
}
LinkReference(_, _, _) => "".to_owned(),
Raw(ref elements) => elements.to_owned(),
Hr => format!("<hr />\n\n"),
};
ret.push_str(&next)
}
ret = ret.trim().to_owned();
ret.push('\n');
ret
}
fn format_spans(elements: &[Span], link_references: &LinkReferenceMap) -> String {
let mut ret = String::new();
for element in elements.iter() {
let next = match *element {
Break => format!("<br />"),
Literal(character) => character.to_string(),
Text(ref text) => format!("{}", &escape(text, true)),
Code(ref text) => format!("<code>{}</code>", &escape(text, false)),
Link(ref content, ref url, None) => format!(
"<a href=\"{}\">{}</a>",
&escape(url, false),
format_spans(content, link_references)
),
Link(ref content, ref url, Some(ref title)) => format!(
"<a href=\"{}\" title=\"{}\">{}</a>",
&escape(url, false),
&escape(title, true),
format_spans(content, link_references)
),
RefLink(ref content, ref reference, ref raw) => {
if let Some((ref url, None)) = link_references.get::<str>(reference) {
format!(
"<a href=\"{}\">{}</a>",
&escape(url, false),
format_spans(content, link_references)
)
} else if let Some((ref url, Some(ref title))) =
link_references.get::<str>(reference)
{
format!(
"<a href=\"{}\" title=\"{}\">{}</a>",
&escape(url, false),
&escape(title, true),
format_spans(content, link_references)
)
} else if let Some((ref url, None)) =
link_references.get::<str>(&slugify(content, false))
{
format!(
"<a href=\"{}\">{}</a>",
&escape(url, false),
format_spans(content, link_references)
)
} else if let Some((ref url, Some(ref title))) =
link_references.get::<str>(&slugify(content, false))
{
format!(
"<a href=\"{}\" title=\"{}\">{}</a>",
&escape(url, false),
&escape(title, true),
format_spans(content, link_references)
)
} else {
raw.to_owned()
}
}
Image(ref text, ref url, None) => format!(
"<img src=\"{}\" alt=\"{}\" />",
&escape(url, false),
&escape(text, true)
),
Image(ref text, ref url, Some(ref title)) => format!(
"<img src=\"{}\" title=\"{}\" alt=\"{}\" />",
&escape(url, false),
&escape(title, true),
&escape(text, true)
),
Emphasis(ref content) => format!("<em>{}</em>", format_spans(content, link_references)),
Strong(ref content) => format!(
"<strong>{}</strong>",
format_spans(content, link_references)
),
};
ret.push_str(&next)
}
ret
}
fn escape(text: &str, replace_entities: bool) -> String {
lazy_static! {
static ref AMPERSAND: Regex = Regex::new(r"&(?P<x>\S+;)").unwrap();
}
let replaced = text
.replace("&", "&")
.replace("<", "<")
.replace("\"", """)
.replace("'", "’")
.replace(">", ">");
// We can't do lookarounds in the regex crate to match only ampersands with
// no entity; afterwards, so we do this ugly hack where we revert the replacement
// everywhere it wasn't desired.
if replace_entities {
return AMPERSAND.replace_all(&replaced, "&$x").into_owned();
}
return replaced;
}
fn format_list(
elements: &[ListItem],
start_tag: &str,
end_tag: &str,
link_references: &LinkReferenceMap,
) -> String {
let mut ret = String::new();
for list_item in elements {
let mut content = String::new();
match *list_item {
ListItem::Simple(ref els) => content.push_str(&format_spans(els, link_references)),
ListItem::Paragraph(ref paragraphs) => {
content.push_str(&format!("\n{}", to_html(paragraphs)))
}
}
ret.push_str(&format!("\n<li>{}</li>\n", content))
}
format!("<{}>{}</{}>\n\n", start_tag, ret, end_tag)
}
fn format_unordered_list(elements: &[ListItem], link_references: &LinkReferenceMap) -> String {
format_list(elements, "ul", "ul", link_references)
}
fn format_ordered_list(
elements: &[ListItem],
num_type: &OrderedListType,
link_references: &LinkReferenceMap,
) -> String {
if num_type != &OrderedListType::Numeric {
format_list(
elements,
&format!("ol type=\"{}\"", num_type.to_str()),
"ol",
link_references,
)
} else {
format_list(elements, "ol", "ol", link_references)
}
}
fn format_codeblock(lang: &Option<String>, elements: &str) -> String {
if lang.is_none() || (lang.is_some() && lang.as_ref().unwrap().is_empty()) {
format!("<pre><code>{}</code></pre>\n\n", &escape(elements, false))
} else {
format!(
"<pre><code class=\"language-{}\">{}</code></pre>\n\n",
&escape(lang.as_ref().unwrap(), false),
&escape(elements, false)
)
}
}
fn format_blockquote(elements: &[Block]) -> String {
format!("<blockquote>\n{}</blockquote>\n\n", to_html(elements))
}
fn format_paragraph(elements: &[Span], link_references: &LinkReferenceMap) -> String {
format!("<p>{}</p>\n\n", format_spans(elements, link_references))
}
fn format_header(elements: &[Span], level: usize, link_references: &LinkReferenceMap) -> String {
format!(
"<h{} id='{}'>{}</h{}>\n\n",
level,
slugify(elements, true),
format_spans(elements, link_references),
level
)
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/lib.rs | src/lib.rs | //! A crate for parsing Markdown in Rust
#![crate_name = "markdown"]
#![deny(missing_docs)]
#![deny(warnings)]
extern crate regex;
#[macro_use]
extern crate pipeline;
#[macro_use]
extern crate lazy_static;
use std::fs::File;
use std::io::{self, Read};
use std::path::Path;
mod html;
mod markdown_generator;
mod parser;
pub use parser::{Block, ListItem, Span};
/// Converts a Markdown string to HTML
pub fn to_html(text: &str) -> String {
let result = parser::parse(text);
html::to_html(&result)
}
/// Converts a Markdown string to a tokenset of Markdown items
pub fn tokenize(text: &str) -> Vec<Block> {
parser::parse(text)
}
/// Convert tokenset of Markdown items back to String
pub fn generate_markdown(x: Vec<Block>) -> String {
markdown_generator::generate(x)
}
/// Opens a file and converts its contents to HTML
pub fn file_to_html(path: &Path) -> io::Result<String> {
let mut file = File::open(path)?;
let mut text = String::new();
file.read_to_string(&mut text)?;
let result = parser::parse(&text);
Ok(html::to_html(&result))
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/main.rs | src/main.rs | extern crate markdown;
use std::env;
use std::path::Path;
fn main() {
let args: Vec<String> = env::args().collect();
println!("{:?}", args);
let path = Path::new(&args[1]);
// let display = path.display();
println!("{}", markdown::file_to_html(&path).unwrap());
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/markdown_generator/mod.rs | src/markdown_generator/mod.rs | use super::{Block, ListItem, Span};
trait JoinHelper<I>
where
I: Iterator,
{
fn j(self, sep: &'static str) -> String;
}
impl<I> JoinHelper<I> for I
where
I: Iterator<Item = String>,
{
fn j(self, sep: &'static str) -> String {
self.collect::<Vec<String>>().join(sep)
}
}
fn gen_block(b: Block) -> String {
use Block::*;
match b {
Header(s, level) => format!(
"{} {}",
::std::iter::repeat("#".to_string()).take(level).j(""),
generate_from_spans(s)
),
Paragraph(s) => generate_from_spans(s),
Blockquote(bb) => generate(bb).lines().map(|x| format!("> {}", x)).j("\n"),
CodeBlock(lang, x) => {
if lang.is_none() {
x.lines().map(|x| format!(" {}", x)).j("\n")
} else {
format!("```{}\n{}```", lang.unwrap(), x)
}
}
// [TODO]: Ordered list generation - 2017-12-10 10:12pm
OrderedList(_x, _num_type) => unimplemented!("Generate ordered list"),
UnorderedList(x) => generate_from_li(x),
LinkReference(id, url, None) => format!("[{}]: {}", id, url),
LinkReference(id, url, Some(title)) => format!("[{}]: {} \"{}\"", id, url, title),
Raw(x) => x,
Hr => "===".to_owned(),
}
}
fn gen_span(s: Span) -> String {
use Span::*;
match s {
Break => " \n".to_string(),
Text(x) => x,
Literal(x) => format!("\\{}", x),
Code(x) => format!("`{}`", x),
Link(a, b, None) => format!("[{}]({})", generate_from_spans(a), b),
Link(a, b, Some(c)) => format!("[{}]({} \"{}\")", generate_from_spans(a), b, c),
RefLink(_, _, raw) => raw,
Image(a, b, None) => format!("", a, b),
Image(a, b, Some(c)) => format!("", a, b, c),
Emphasis(x) => format!("*{}*", generate_from_spans(x)),
Strong(x) => format!("**{}**", generate_from_spans(x)),
}
}
fn generate_from_li(data: Vec<ListItem>) -> String {
use ListItem::*;
data.into_iter()
.map(|x| {
format!(
"* {}",
match x {
Simple(x) => generate_from_spans(x),
Paragraph(x) => format!(
"{}\n",
generate(x)
.lines()
.enumerate()
.map(|(i, x)| if i == 0 {
x.to_string()
} else {
format!(" {}", x)
})
.j("\n")
),
}
)
})
.j("\n")
}
fn generate_from_spans(data: Vec<Span>) -> String {
data.into_iter().map(gen_span).j("")
}
pub fn generate(data: Vec<Block>) -> String {
data.into_iter().map(gen_block).j("\n\n")
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/mod.rs | src/parser/mod.rs | mod block;
mod span;
#[allow(missing_docs)]
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum OrderedListType {
Numeric,
Lowercase,
Uppercase,
LowercaseRoman,
UppercaseRoman,
}
impl OrderedListType {
pub fn from_str(type_str: &str) -> OrderedListType {
match type_str {
"a" => OrderedListType::Lowercase,
"A" => OrderedListType::Uppercase,
"i" => OrderedListType::LowercaseRoman,
"I" => OrderedListType::UppercaseRoman,
_ => OrderedListType::Numeric,
}
}
pub fn to_str(&self) -> &'static str {
match self {
OrderedListType::Lowercase => "a",
OrderedListType::Uppercase => "A",
OrderedListType::LowercaseRoman => "i",
OrderedListType::UppercaseRoman => "I",
OrderedListType::Numeric => "1",
}
}
}
#[allow(missing_docs)]
#[derive(Debug, PartialEq, Clone)]
pub enum Block {
Header(Vec<Span>, usize),
Paragraph(Vec<Span>),
Blockquote(Vec<Block>),
CodeBlock(Option<String>, String),
/** A link reference with the fields: (id, url, [title]) **/
LinkReference(String, String, Option<String>),
OrderedList(Vec<ListItem>, OrderedListType),
UnorderedList(Vec<ListItem>),
Raw(String),
Hr,
}
#[allow(missing_docs)]
#[derive(Debug, PartialEq, Clone)]
pub enum ListItem {
Simple(Vec<Span>),
Paragraph(Vec<Block>),
}
#[allow(missing_docs)]
#[derive(Debug, PartialEq, Clone)]
pub enum Span {
Break,
Text(String),
Code(String),
Literal(char),
Link(Vec<Span>, String, Option<String>),
/**
* A reference-style link with the fields: (content, url, raw)
* The "raw" field is used internally for falling back to the original
* markdown link if the corresponding reference is not found at render time.
**/
RefLink(Vec<Span>, String, String),
Image(String, String, Option<String>),
Emphasis(Vec<Span>),
Strong(Vec<Span>),
}
pub fn parse(md: &str) -> Vec<Block> {
block::parse_blocks(md)
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/blockquote.rs | src/parser/block/blockquote.rs | use parser::block::parse_blocks;
use parser::Block;
use parser::Block::Blockquote;
pub fn parse_blockquote(lines: &[&str]) -> Option<(Block, usize)> {
// if the first char isnt a blockquote don't even bother
if lines[0].is_empty() || !lines[0].starts_with(">") {
return None;
}
// the content of the blockquote
let mut content = String::new();
// counts the number of parsed lines to return
let mut i = 0;
// captures if the previous item was a newline
// meaning the blockquote ends next if it's not
// explicitly continued with a >
let mut prev_newline = false;
for line in lines {
// stop parsing on two newlines or if the paragraph after
// a newline isn't started with a >
// we continue to parse if it's just another empty line
if prev_newline && line.len() > 0 && !line.starts_with(">") {
break;
}
if line.is_empty() {
prev_newline = true;
} else {
prev_newline = false;
}
let mut chars = line.chars();
let begin = match chars.next() {
Some('>') => match chars.next() {
Some(' ') => 2,
_ => 1,
},
_ => 0,
};
if i > 0 {
content.push('\n');
}
content.push_str(&line[begin..line.len()]);
i += 1;
}
if i > 0 {
return Some((Blockquote(parse_blocks(&content)), i));
}
None
}
#[cfg(test)]
mod test {
use super::parse_blockquote;
use parser::Block::Blockquote;
#[test]
fn finds_blockquote() {
match parse_blockquote(&vec!["> A citation", "> is good"]) {
Some((Blockquote(_), 2)) => (),
_ => panic!(),
}
match parse_blockquote(&vec!["> A citation", "> is good,", "very good"]) {
Some((Blockquote(_), 3)) => (),
_ => panic!(),
}
}
#[test]
fn knows_when_to_stop() {
match parse_blockquote(&vec!["> A citation", "> is good", "", "whatever"]) {
Some((Blockquote(_), 3)) => (),
_ => panic!(),
}
}
#[test]
fn no_false_positives() {
assert_eq!(parse_blockquote(&vec!["wat > this"]), None);
}
#[test]
fn no_early_matching() {
assert_eq!(
parse_blockquote(&vec!["Hello", "> A citation", "> is good", "", "whatever"]),
None
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/unordered_list.rs | src/parser/block/unordered_list.rs | use parser::block::parse_blocks;
use parser::Block;
use parser::Block::{Paragraph, UnorderedList};
use parser::ListItem;
use regex::Regex;
pub fn parse_unordered_list(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref LIST_BEGIN: Regex =
Regex::new(r"^(?P<indent> *)(-|\+|\*) (?P<content>.*)").unwrap();
static ref NEW_PARAGRAPH: Regex = Regex::new(r"^ +").unwrap();
static ref INDENTED: Regex = Regex::new(r"^ {0,4}(?P<content>.*)").unwrap();
}
// if the beginning doesn't match a list don't even bother
if !LIST_BEGIN.is_match(lines[0]) {
return None;
}
// a vec holding the contents and indentation
// of each list item
let mut contents = vec![];
let mut prev_newline = false;
let mut is_paragraph = false;
// counts the number of parsed lines to return
let mut i = 0;
let mut line_iter = lines.iter();
let mut line = line_iter.next();
// loop for list items
loop {
if line.is_none() || !LIST_BEGIN.is_match(line.unwrap()) {
break;
}
if prev_newline {
is_paragraph = true;
prev_newline = false;
}
let caps = LIST_BEGIN.captures(line.unwrap()).unwrap();
let mut content = caps.name("content").unwrap().as_str().to_owned();
let last_indent = caps.name("indent").unwrap().as_str().len();
i += 1;
// parse additional lines of the listitem
loop {
line = line_iter.next();
if line.is_none() || (prev_newline && !NEW_PARAGRAPH.is_match(line.unwrap())) {
break;
}
if LIST_BEGIN.is_match(line.unwrap()) {
let caps = LIST_BEGIN.captures(line.unwrap()).unwrap();
let indent = caps.name("indent").unwrap().as_str().len();
if indent < 2 || indent <= last_indent {
break;
}
}
// newline means we start a new paragraph
if line.unwrap().is_empty() {
prev_newline = true;
} else {
prev_newline = false;
}
content.push('\n');
let caps = INDENTED.captures(line.unwrap()).unwrap();
content.push_str(&caps.name("content").unwrap().as_str());
i += 1;
}
contents.push(parse_blocks(&content));
}
let mut list_contents = vec![];
for c in contents {
if is_paragraph || c.len() > 1 {
list_contents.push(ListItem::Paragraph(c));
} else if let Paragraph(content) = c[0].clone() {
list_contents.push(ListItem::Simple(content));
}
}
if i > 0 {
return Some((UnorderedList(list_contents), i));
}
None
}
#[cfg(test)]
mod test {
use super::parse_unordered_list;
use parser::Block::UnorderedList;
#[test]
fn finds_list() {
match parse_unordered_list(&vec!["* A list", "* is good"]) {
Some((UnorderedList(_), 2)) => (),
x => panic!("Found {:?}", x),
}
match parse_unordered_list(&vec!["* A list", "* is good", "laksjdnflakdsjnf"]) {
Some((UnorderedList(_), 3)) => (),
x => panic!("Found {:?}", x),
}
}
#[test]
fn knows_when_to_stop() {
match parse_unordered_list(&vec!["* A list", "* is good", "", "laksjdnflakdsjnf"]) {
Some((UnorderedList(_), 3)) => (),
x => panic!("Found {:?}", x),
}
match parse_unordered_list(&vec!["* A list", "", "laksjdnflakdsjnf"]) {
Some((UnorderedList(_), 2)) => (),
x => panic!("Found {:?}", x),
}
}
#[test]
fn no_false_positives() {
assert_eq!(parse_unordered_list(&vec!["test * test"]), None);
}
#[test]
fn no_early_matching() {
assert_eq!(
parse_unordered_list(&vec!["test", "* whot", "* a list"]),
None
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/hr.rs | src/parser/block/hr.rs | use parser::Block;
use parser::Block::Hr;
use regex::Regex;
pub fn parse_hr(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref HORIZONTAL_RULE: Regex = Regex::new(r"^(===+)$|^(---+)$").unwrap();
}
if HORIZONTAL_RULE.is_match(lines[0]) {
return Some((Hr, 1));
}
None
}
#[cfg(test)]
mod test {
use super::parse_hr;
use parser::Block::Hr;
#[test]
fn finds_hr() {
assert_eq!(parse_hr(&vec!["-------"]).unwrap(), (Hr, 1));
assert_eq!(parse_hr(&vec!["---"]).unwrap(), (Hr, 1));
assert_eq!(
parse_hr(&vec!["----------------------------"]).unwrap(),
(Hr, 1)
);
assert_eq!(parse_hr(&vec!["-------", "abc"]).unwrap(), (Hr, 1));
assert_eq!(parse_hr(&vec!["======="]).unwrap(), (Hr, 1));
assert_eq!(parse_hr(&vec!["==="]).unwrap(), (Hr, 1));
assert_eq!(
parse_hr(&vec!["============================"]).unwrap(),
(Hr, 1)
);
assert_eq!(parse_hr(&vec!["=======", "abc"]).unwrap(), (Hr, 1));
}
#[test]
fn no_false_positives() {
assert_eq!(parse_hr(&vec!["a-------"]), None);
assert_eq!(parse_hr(&vec!["--- a"]), None);
assert_eq!(parse_hr(&vec!["--a-"]), None);
assert_eq!(parse_hr(&vec!["-------====--------------"]), None);
assert_eq!(parse_hr(&vec!["a======"]), None);
assert_eq!(parse_hr(&vec!["=== a"]), None);
assert_eq!(parse_hr(&vec!["==a="]), None);
assert_eq!(parse_hr(&vec!["=======---================="]), None);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/mod.rs | src/parser/block/mod.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Paragraph;
use parser::Span::{Break, Text};
mod atx_header;
mod blockquote;
mod code_block;
mod hr;
mod link_reference;
mod ordered_list;
mod setext_header;
mod unordered_list;
use self::atx_header::parse_atx_header;
use self::blockquote::parse_blockquote;
use self::code_block::parse_code_block;
use self::hr::parse_hr;
use self::link_reference::parse_link_reference;
use self::ordered_list::parse_ordered_list;
use self::setext_header::parse_setext_header;
use self::unordered_list::parse_unordered_list;
pub fn parse_blocks(md: &str) -> Vec<Block> {
let mut blocks = vec![];
let mut t = vec![];
let lines: Vec<&str> = md.lines().collect();
let mut i = 0;
while i < lines.len() {
match parse_block(&lines[i..lines.len()]) {
// if a block is found
Some((block, consumed_lines)) => {
// the current paragraph has ended,
// push it to our blocks
if !t.is_empty() {
blocks.push(Paragraph(t));
t = Vec::new();
}
blocks.push(block);
i += consumed_lines;
}
// no known element, let's make this a paragraph
None => {
// empty linebreak => new paragraph
if lines[i].is_empty() && !t.is_empty() {
blocks.push(Paragraph(t));
t = Vec::new();
}
let spans = parse_spans(lines[i]);
// add a newline between linebreaks
// except when we have a break element or nothing
match (t.last(), spans.first()) {
(Some(&Break), _) => {}
(_, None) => {}
(None, _) => {}
_ => t.push(Text("\n".to_owned())),
}
t.extend_from_slice(&spans);
i += 1;
}
}
}
if !t.is_empty() {
blocks.push(Paragraph(t));
}
blocks
}
fn parse_block(lines: &[&str]) -> Option<(Block, usize)> {
pipe_opt!(
lines
=> parse_hr
=> parse_atx_header
=> parse_code_block
=> parse_blockquote
=> parse_unordered_list
=> parse_ordered_list
=> parse_link_reference
// Must not match before anything else. See: https://spec.commonmark.org/0.29/#setext-headings
=> parse_setext_header
)
}
#[cfg(test)]
mod test {
use super::parse_blocks;
use parser::Block::{Blockquote, CodeBlock, Header, Hr, Paragraph};
use parser::Span::Text;
#[test]
fn finds_atx_header() {
assert_eq!(
parse_blocks("### Test"),
vec![Header(vec![Text("Test".to_owned())], 3)]
);
}
#[test]
fn finds_setext_header() {
assert_eq!(
parse_blocks("Test\n-------"),
vec![Header(vec![Text("Test".to_owned())], 2)]
);
assert_eq!(
parse_blocks("Test\n======="),
vec![Header(vec![Text("Test".to_owned())], 1)]
);
}
#[test]
fn finds_hr() {
assert_eq!(parse_blocks("-------"), vec![Hr]);
assert_eq!(parse_blocks("======="), vec![Hr]);
}
#[test]
fn finds_code_block() {
assert_eq!(
parse_blocks(" this is code\n and this as well"),
vec![CodeBlock(None, "this is code\nand this as well".to_owned())]
);
assert_eq!(
parse_blocks("```\nthis is code\nand this as well\n```"),
vec![CodeBlock(
Some(String::new()),
"this is code\nand this as well".to_owned()
)]
);
}
#[test]
fn finds_blockquotes() {
assert_eq!(
parse_blocks("> One Paragraph\n>\n> ## H2 \n>\n"),
vec![Blockquote(vec![
Paragraph(vec![Text("One Paragraph".to_owned())]),
Header(vec![Text("H2".to_owned())], 2)
])]
);
assert_eq!(
parse_blocks("> One Paragraph\n>\n> > Another blockquote\n>\n"),
vec![Blockquote(vec![
Paragraph(vec![Text("One Paragraph".to_owned())]),
Blockquote(vec![Paragraph(vec![Text("Another blockquote".to_owned())])])
])]
);
assert_eq!(
parse_blocks("> > One Paragraph\n> >\n> > Another blockquote\n>\n"),
vec![Blockquote(vec![Blockquote(vec![
Paragraph(vec![Text("One Paragraph".to_owned())]),
Paragraph(vec![Text("Another blockquote".to_owned())])
])])]
);
assert_eq!(
parse_blocks("> One Paragraph, just > text \n>\n"),
vec![Blockquote(vec![Paragraph(vec![Text(
"One Paragraph, just > text".to_owned()
)])])]
);
assert_eq!(
parse_blocks("> One Paragraph\n>\n> just > text \n>\n"),
vec![Blockquote(vec![
Paragraph(vec![Text("One Paragraph".to_owned())]),
Paragraph(vec![Text("just > text".to_owned())])
])]
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/ordered_list.rs | src/parser/block/ordered_list.rs | use parser::block::parse_blocks;
use parser::Block;
use parser::Block::{OrderedList, Paragraph};
use parser::{ListItem, OrderedListType};
use regex::Regex;
pub fn parse_ordered_list(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref LIST_BEGIN: Regex =
Regex::new(r"^(?P<indent> *)(?P<numbering>[0-9.]+|[aAiI]+\.) (?P<content>.*)").unwrap();
static ref NEW_PARAGRAPH: Regex = Regex::new(r"^ +").unwrap();
static ref INDENTED: Regex = Regex::new(r"^ {0,4}(?P<content>.*)").unwrap();
}
// if the beginning doesn't match a list don't even bother
if !LIST_BEGIN.is_match(lines[0]) {
return None;
}
// a vec holding the contents and indentation
// of each list item
let mut contents = vec![];
let mut prev_newline = false;
let mut is_paragraph = false;
// counts the number of parsed lines to return
let mut i = 0;
let mut line_iter = lines.iter();
let mut line = line_iter.next();
let mut list_num_opt = None;
// loop for list items
loop {
if line.is_none() || !LIST_BEGIN.is_match(line.unwrap()) {
break;
}
if prev_newline {
is_paragraph = true;
prev_newline = false;
}
let caps = LIST_BEGIN.captures(line.unwrap()).unwrap();
let mut content = caps.name("content").unwrap().as_str().to_owned();
let last_indent = caps.name("indent").unwrap().as_str().len();
//We use the first list type found
// TODO: utf-8 safe?
list_num_opt = list_num_opt
.or_else(|| Some(caps.name("numbering").unwrap().as_str()[0..1].to_owned()));
i += 1;
// parse additional lines of the listitem
loop {
line = line_iter.next();
if line.is_none() || (prev_newline && !NEW_PARAGRAPH.is_match(line.unwrap())) {
break;
}
if LIST_BEGIN.is_match(line.unwrap()) {
let caps = LIST_BEGIN.captures(line.unwrap()).unwrap();
let indent = caps.name("indent").unwrap().as_str().len();
if indent < 2 || indent <= last_indent {
break;
}
}
// newline means we start a new paragraph
if line.unwrap().is_empty() {
prev_newline = true;
} else {
prev_newline = false;
}
content.push('\n');
let caps = INDENTED.captures(line.unwrap()).unwrap();
content.push_str(&caps.name("content").unwrap().as_str());
i += 1;
}
contents.push(parse_blocks(&content));
}
let mut list_contents = vec![];
for c in contents {
if is_paragraph || c.len() > 1 {
list_contents.push(ListItem::Paragraph(c));
} else if let Paragraph(content) = c[0].clone() {
list_contents.push(ListItem::Simple(content));
}
}
if i > 0 {
let list_num = list_num_opt.unwrap_or("1".to_string());
return Some((
OrderedList(list_contents, OrderedListType::from_str(&list_num)),
i,
));
}
None
}
#[cfg(test)]
mod test {
use super::parse_ordered_list;
use parser::Block::OrderedList;
use parser::ListItem::Paragraph;
use parser::OrderedListType;
#[test]
fn finds_list() {
match parse_ordered_list(&vec!["1. A list", "2. is good"]) {
Some((OrderedList(_, lt), 2)) if lt == OrderedListType::Numeric => (),
x => panic!("Found {:?}", x),
}
match parse_ordered_list(&vec!["a. A list", "b. is good", "laksjdnflakdsjnf"]) {
Some((OrderedList(_, lt), 3)) if lt == OrderedListType::Lowercase => (),
x => panic!("Found {:?}", x),
}
match parse_ordered_list(&vec!["A. A list", "B. is good", "laksjdnflakdsjnf"]) {
Some((OrderedList(_, lt), 3)) if lt == OrderedListType::Uppercase => (),
x => panic!("Found {:?}", x),
}
}
#[test]
fn knows_when_to_stop() {
match parse_ordered_list(&vec!["i. A list", "ii. is good", "", "laksjdnflakdsjnf"]) {
Some((OrderedList(_, lt), 3)) if lt == OrderedListType::LowercaseRoman => (),
x => panic!("Found {:?}", x),
}
match parse_ordered_list(&vec!["I. A list", "", "laksjdnflakdsjnf"]) {
Some((OrderedList(_, lt), 2)) if lt == OrderedListType::UppercaseRoman => (),
x => panic!("Found {:?}", x),
}
}
#[test]
fn multi_level_list() {
match parse_ordered_list(&vec![
"1. A list",
" 1.1. One point one",
" 1.2. One point two",
]) {
Some((OrderedList(ref items, lt), 3)) if lt == OrderedListType::Numeric => {
match &items[0] {
&Paragraph(ref items) => match &items[1] {
&OrderedList(_, ref lt1) if lt1 == &OrderedListType::Numeric => (),
x => panic!("Found {:?}", x),
},
x => panic!("Found {:?}", x),
}
}
x => panic!("Found {:?}", x),
}
}
#[test]
fn no_false_positives() {
assert_eq!(parse_ordered_list(&vec!["test 1. test"]), None);
}
#[test]
fn no_early_matching() {
assert_eq!(
parse_ordered_list(&vec!["test", "1. not", "2. a list"]),
None
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/link_reference.rs | src/parser/block/link_reference.rs | use parser::Block;
use parser::Block::LinkReference;
use regex::Regex;
pub fn parse_link_reference(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref LINK_REFERENCE_SINGLE_LINE: Regex = Regex::new("^\\s*\\[(?P<id>[^\\[\\]]+)\\]:\\s*(?P<url>\\S+)(?:\\s+(?:'(?P<title1>.*)'|\"(?P<title2>.*)\"|\\((?P<title3>.*?)\\)))?\n?").unwrap();
static ref LINK_REFERENCE_FIRST_LINE: Regex = Regex::new("^\\s*\\[(?P<id>[^\\[\\]]+)\\]:").unwrap();
static ref LINK_REFERENCE_SECOND_LINE: Regex = Regex::new("\\s*(?P<url>\\S+)(?:\\s+(?:'(?P<title1>.*)'|\"(?P<title2>.*)\"|\\((?P<title3>.*?)\\)))?\n?").unwrap();
}
if LINK_REFERENCE_SINGLE_LINE.is_match(lines[0]) {
let caps = LINK_REFERENCE_SINGLE_LINE.captures(lines[0]).unwrap();
return Some((
LinkReference(
caps.name("id").unwrap().as_str().to_lowercase(),
caps.name("url").unwrap().as_str().to_owned(),
caps.name("title1")
.or_else(|| caps.name("title2"))
.or_else(|| caps.name("title3"))
.map(|s| s.as_str().to_owned()),
),
1,
));
}
if LINK_REFERENCE_FIRST_LINE.is_match(lines[0]) && LINK_REFERENCE_SECOND_LINE.is_match(lines[1])
{
let caps1 = LINK_REFERENCE_FIRST_LINE.captures(lines[0]).unwrap();
let caps2 = LINK_REFERENCE_SECOND_LINE.captures(lines[1]).unwrap();
return Some((
LinkReference(
caps1.name("id").unwrap().as_str().to_lowercase(),
caps2.name("url").unwrap().as_str().to_owned(),
caps2
.name("title1")
.or_else(|| caps2.name("title2"))
.or_else(|| caps2.name("title3"))
.map(|s| s.as_str().to_owned()),
),
2,
));
}
None
}
#[cfg(test)]
mod test {
use super::parse_link_reference;
use parser::Block::LinkReference;
#[test]
fn finds_link_reference() {
assert_eq!(
parse_link_reference(&vec!["[Test]: https://example.com"]).unwrap(),
(
LinkReference("test".to_owned(), "https://example.com".to_owned(), None),
1
)
);
assert_eq!(
parse_link_reference(&vec!["[Test]: https://example.com \"example\""]).unwrap(),
(
LinkReference(
"test".to_owned(),
"https://example.com".to_owned(),
Some("example".to_owned())
),
1
)
);
assert_eq!(
parse_link_reference(&vec!["[Test]: https://example.com (example)"]).unwrap(),
(
LinkReference(
"test".to_owned(),
"https://example.com".to_owned(),
Some("example".to_owned())
),
1
)
);
assert_eq!(
parse_link_reference(&vec!["[Test]: https://example.com 'example'"]).unwrap(),
(
LinkReference(
"test".to_owned(),
"https://example.com".to_owned(),
Some("example".to_owned())
),
1
)
);
assert_eq!(
parse_link_reference(&vec!["[Test]: https://example.com 'example'"])
.unwrap(),
(
LinkReference(
"test".to_owned(),
"https://example.com".to_owned(),
Some("example".to_owned())
),
1
)
);
assert_eq!(
parse_link_reference(&vec!["[Test]:", "https://example.com \"example\""]).unwrap(),
(
LinkReference(
"test".to_owned(),
"https://example.com".to_owned(),
Some("example".to_owned())
),
2
)
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/setext_header.rs | src/parser/block/setext_header.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Header;
use regex::Regex;
pub fn parse_setext_header(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref HORIZONTAL_RULE_1: Regex = Regex::new(r"^===+$").unwrap();
static ref HORIZONTAL_RULE_2: Regex = Regex::new(r"^---+$").unwrap();
}
if lines.len() > 1 && !lines[0].is_empty() {
if HORIZONTAL_RULE_1.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 1), 2));
} else if HORIZONTAL_RULE_2.is_match(lines[1]) {
return Some((Header(parse_spans(lines[0]), 2), 2));
}
}
None
}
#[cfg(test)]
mod test {
use super::parse_setext_header;
use parser::Block::Header;
use parser::Span::Text;
#[test]
fn finds_atx_header() {
assert_eq!(
parse_setext_header(&vec!["Test", "=========="]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["Test", "----------"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 2), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "==="]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 1), 2)
);
assert_eq!(
parse_setext_header(&vec!["This is a test", "---"]).unwrap(),
(Header(vec![Text("This is a test".to_owned())], 2), 2)
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/atx_header.rs | src/parser/block/atx_header.rs | use parser::span::parse_spans;
use parser::Block;
use parser::Block::Header;
use regex::Regex;
pub fn parse_atx_header(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref ATX_HEADER_RE: Regex =
Regex::new(r"^(?P<level>#{1,6})\s(?P<text>.*?)(?:\s#*)?$").unwrap();
}
if ATX_HEADER_RE.is_match(lines[0]) {
let caps = ATX_HEADER_RE.captures(lines[0]).unwrap();
return Some((
Header(
parse_spans(caps.name("text").unwrap().as_str()),
caps.name("level").unwrap().as_str().len(),
),
1,
));
}
None
}
#[cfg(test)]
mod test {
use super::parse_atx_header;
use parser::Block::Header;
use parser::Span::Text;
#[test]
fn finds_atx_header() {
assert_eq!(
parse_atx_header(&vec!["### Test", "testtest"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 3), 1)
);
assert_eq!(
parse_atx_header(&vec!["# Test", "testtest"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 1)
);
assert_eq!(
parse_atx_header(&vec!["###### Test", "testtest"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 6), 1)
);
assert_eq!(
parse_atx_header(&vec!["### Test and a pretty long sentence", "testtest"]).unwrap(),
(
Header(vec![Text("Test and a pretty long sentence".to_owned())], 3),
1
)
);
}
#[test]
fn ignores_closing_hashes() {
assert_eq!(
parse_atx_header(&vec!["### Test ###", "testtest"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 3), 1)
);
assert_eq!(
parse_atx_header(&vec!["# Test #", "testtest"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 1), 1)
);
assert_eq!(
parse_atx_header(&vec!["###### Test ##", "testtest"]).unwrap(),
(Header(vec![Text("Test".to_owned())], 6), 1)
);
assert_eq!(
parse_atx_header(&vec![
"### Test and a pretty long sentence #########",
"testtest"
])
.unwrap(),
(
Header(vec![Text("Test and a pretty long sentence".to_owned())], 3),
1
)
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_atx_header(&vec!["####### Test", "testtest"]), None);
assert_eq!(parse_atx_header(&vec!["Test #", "testtest"]), None);
assert_eq!(parse_atx_header(&vec!["T ### est #", "testtest"]), None);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/block/code_block.rs | src/parser/block/code_block.rs | use parser::Block;
use parser::Block::CodeBlock;
use regex::Regex;
pub fn parse_code_block(lines: &[&str]) -> Option<(Block, usize)> {
lazy_static! {
static ref CODE_BLOCK_SPACES: Regex = Regex::new(r"^ {4}").unwrap();
static ref CODE_BLOCK_TABS: Regex = Regex::new(r"^\t").unwrap();
static ref CODE_BLOCK_BACKTICKS: Regex = Regex::new(r"^```").unwrap();
}
let mut content = String::new();
let mut lang: Option<String> = None;
let mut line_number = 0;
let mut backtick_opened = false;
let mut backtick_closed = false;
for line in lines {
if !backtick_opened && CODE_BLOCK_SPACES.is_match(line) {
if line_number > 0 && !content.is_empty() {
content.push('\n');
}
// remove top-level spaces
content.push_str(&line[4..line.len()]);
line_number += 1;
} else if !backtick_opened && CODE_BLOCK_TABS.is_match(line) {
if line_number > 0 && !content.is_empty() {
content.push('\n');
}
if !(line_number == 0 && line.trim().is_empty()) {
// remove top-level spaces
content.push_str(&line[1..line.len()]);
}
line_number += 1;
} else if CODE_BLOCK_BACKTICKS.is_match(line) {
line_number += 1;
if !backtick_opened && !(line_number == 0 && line.get(3..).is_some()) {
lang = Some(String::from(line.get(3..).unwrap()));
backtick_opened = true;
} else if backtick_opened {
backtick_closed = true;
break;
}
} else if backtick_opened {
content.push_str(line);
content.push('\n');
line_number += 1;
} else {
break;
}
}
if line_number > 0 && ((backtick_opened && backtick_closed) || !backtick_opened) {
return Some((
CodeBlock(lang, content.trim_matches('\n').to_owned()),
line_number,
));
}
None
}
#[cfg(test)]
mod test {
use super::parse_code_block;
use parser::Block::CodeBlock;
#[test]
fn finds_code_block() {
assert_eq!(
parse_code_block(&vec![" Test"]).unwrap(),
((CodeBlock(None, "Test".to_owned()), 1))
);
assert_eq!(
parse_code_block(&vec![" Test", " this"]).unwrap(),
((CodeBlock(None, "Test\nthis".to_owned()), 2))
);
assert_eq!(
parse_code_block(&vec!["```testlang", "Test", "this", "```"]).unwrap(),
((
CodeBlock(Some(String::from("testlang")), "Test\nthis".to_owned()),
4
))
);
}
#[test]
fn knows_when_to_stop() {
assert_eq!(
parse_code_block(&vec![" Test", " this", "stuff", " now"]).unwrap(),
((CodeBlock(None, "Test\nthis".to_owned()), 2))
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_code_block(&vec![" Test"]), None);
}
#[test]
fn no_early_matching() {
assert_eq!(
parse_code_block(&vec!["Test", " this", "stuff", " now"]),
None
);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/br.rs | src/parser/span/br.rs | use parser::Span;
use parser::Span::Break;
use regex::Regex;
pub fn parse_break(text: &str) -> Option<(Span, usize)> {
lazy_static! {
static ref BR: Regex = Regex::new(r"^ {2}$").unwrap();
}
if BR.is_match(text) {
return Some((Break, 2));
}
None
}
#[cfg(test)]
mod test {
use super::parse_break;
use parser::Span::Break;
#[test]
fn finds_breaks() {
assert_eq!(parse_break(" "), Some((Break, 2)));
}
#[test]
fn no_false_positives() {
assert_eq!(parse_break("this is a test "), None);
assert_eq!(parse_break(" "), None);
assert_eq!(parse_break(" a"), None);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/link.rs | src/parser/span/link.rs | use parser::span::parse_spans;
use parser::Span;
use parser::Span::{Link, RefLink};
use regex::Regex;
pub fn parse_link(text: &str) -> Option<(Span, usize)> {
lazy_static! {
// This is the second part of the regex, that matches the reference or url and title.
static ref LINK_ATTR_STR: &'static str = "(?:\\s*\\[(?P<ref>.*)\\]|\\((?P<url>.*?)(?:\\s*\"(?P<title>.*?)\")?\\s*\\))?";
// This regex does not sufficiently cover the edge case where there are brackets (e.g. for
// images) inside a link text. It's sufficient for identifying links anyway, we'll properly
// figure out the braces below.
static ref LINK_STR: String = "^\\[(?P<text>.*?)\\]".to_owned() + &LINK_ATTR_STR;
static ref LINK: Regex =
Regex::new(&LINK_STR).unwrap();
static ref LINK_ATTR: Regex =
Regex::new(&("^".to_owned() + &LINK_ATTR_STR)).unwrap();
}
if LINK.is_match(text) {
let mut chars = text.chars();
let mut content = String::new();
// This tracks open vs. closed braces, it starts at 1 because we have an initial
// open brace, we want to reach 0 to find the closing brace for the link.
assert_eq!('[', chars.next().unwrap());
let mut brace_level = 1;
// Walk through the link content matching braces to ensure that we find the correct closing
// brace for the link, e.g. `[a link with  inside](link)` should not only parse
// `[a link with ![an image]`.
while let Some(next) = chars.next() {
// Skip escaped braces.
if next == '\\' {
content.push(next);
if let Some(x) = chars.next() {
content.push(x);
}
continue;
} else if next == ']' {
brace_level -= 1;
} else if next == '[' {
brace_level += 1;
}
if brace_level == 0 {
break;
}
content.push(next);
}
// Unmatched braces inside a link text are not supported on purpose, e.g. consider the case
// of `The brace character ([) is parsed [like this](https://example.com)`. Here we want
// to prefer the shorter link at the end instead of starting to parse at the first `[`.
if brace_level != 0 {
return None;
}
let caps = LINK_ATTR.captures(chars.as_str()).unwrap();
// Check whether we have an inline link (in which case the "url" field is captured),
// whether there's an explicit reference provided or if we should implicitly use the link
// content as reference.
if let Some(url) = caps.name("url") {
let url = url.as_str().trim().to_owned();
let title = caps.name("title").map(|t| t.as_str().to_owned());
let len = 1 + content.len() + 1 + caps[0].len();
return Some((Link(parse_spans(&content), url, title), len));
} else if let Some(reference) = caps.name("ref") {
let reference = reference.as_str().trim().to_lowercase();
let len = 1 + content.len() + 1 + caps[0].len();
let raw = ["[", &content, "]", &caps[0]].join("");
return Some((RefLink(parse_spans(&content), reference, raw), len));
} else {
// Leave the reference empty, the HTML generating code will try to match both reference
// and slugified content.
let reference = "".to_owned();
let len = 1 + content.len() + 1;
let raw = ["[", &content, "]"].join("");
return Some((RefLink(parse_spans(&content), reference, raw), len));
}
}
None
}
#[cfg(test)]
mod test {
use parser::span::parse_link;
use parser::Span::{Image, Link, Literal, RefLink, Text};
#[test]
fn finds_link() {
assert_eq!(
parse_link("[an example](example.com) test"),
Some((
Link(
vec![Text("an example".to_owned())],
"example.com".to_owned(),
None
),
25
))
);
assert_eq!(
parse_link("[an example][example]"),
Some((
RefLink(
vec![Text("an example".to_owned())],
"example".to_owned(),
"[an example][example]".to_owned()
),
21
))
);
assert_eq!(
parse_link("[](example.com) test"),
Some((Link(vec![], "example.com".to_owned(), None), 15))
);
assert_eq!(
parse_link("[an example]() test"),
Some((
Link(vec![Text("an example".to_owned())], "".to_owned(), None),
14
))
);
assert_eq!(
parse_link("[]() test"),
Some((Link(vec![], "".to_owned(), None), 4))
);
assert_eq!(
parse_link("[()] test"),
Some((
RefLink(
vec![Text("()".to_owned())],
"".to_owned(),
"[()]".to_owned()
),
4
))
);
assert_eq!(
parse_link("[an example](example.com \"Title\") test"),
Some((
Link(
vec![Text("an example".to_owned())],
"example.com".to_owned(),
Some("Title".to_owned())
),
33
))
);
assert_eq!(
parse_link("[an example](example.com) test [a link](example.com)"),
Some((
Link(
vec![Text("an example".to_owned())],
"example.com".to_owned(),
None
),
25
))
);
}
#[test]
fn brackets_in_link() {
assert_eq!(
parse_link("[](example.com) test [a link](example.com)"),
Some((
Link(
vec![Image("test".to_owned(), "abc".to_owned(), None)],
"example.com".to_owned(),
None
),
27
))
);
assert_eq!(
parse_link("[huh[]wow](example.com)"),
Some((
Link(
vec![
Text("huh".to_owned()),
RefLink(vec![], "".to_owned(), "[]".to_owned()),
Text("wow".to_owned())
],
"example.com".to_owned(),
None
),
23
))
);
assert_eq!(
parse_link("[huh\\[wow](example.com)"),
Some((
Link(
vec![Text("huh".to_owned()), Literal('['), Text("wow".to_owned())],
"example.com".to_owned(),
None
),
23
))
);
assert_eq!(parse_link("[huh[wow](example.com)"), None);
assert_eq!(
parse_link("[an example](example.com \"Title (huh!)\") test"),
Some((
Link(
vec![Text("an example".to_owned())],
"example.com".to_owned(),
Some("Title (huh!)".to_owned())
),
40
))
);
}
#[test]
fn space_length() {
assert_eq!(
parse_link("[an example] [example]"),
Some((
RefLink(
vec![Text("an example".to_owned())],
"example".to_owned(),
"[an example] [example]".to_owned()
),
27
))
);
assert_eq!(
parse_link("[an example](example.com \"Title\") test"),
Some((
Link(
vec![Text("an example".to_owned())],
"example.com".to_owned(),
Some("Title".to_owned())
),
43
))
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_link("()[] testing things test"), None);
assert_eq!(parse_link("[[][[]] testing things test"), None);
}
#[test]
fn no_early_matching() {
assert_eq!(parse_link("were [an example](example.com) test"), None);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/image.rs | src/parser/span/image.rs | use parser::Span;
use parser::Span::Image;
use regex::Regex;
pub fn parse_image(text: &str) -> Option<(Span, usize)> {
lazy_static! {
static ref IMAGE: Regex =
Regex::new("^!\\[(?P<text>.*?)\\]\\((?P<url>.*?)(?:\\s\"(?P<title>.*?)\")?\\)")
.unwrap();
}
if IMAGE.is_match(text) {
let caps = IMAGE.captures(text).unwrap();
let text = if let Some(mat) = caps.name("text") {
mat.as_str().to_owned()
} else {
"".to_owned()
};
let url = if let Some(mat) = caps.name("url") {
mat.as_str().to_owned()
} else {
"".to_owned()
};
let title = if let Some(mat) = caps.name("title") {
Some(mat.as_str().to_owned())
} else {
None
};
// TODO correctly get whitespace length between url and title
let len = text.len() + url.len() + 5 + title.clone().map_or(0, |t| t.len() + 3);
return Some((Image(text, url, title), len));
}
None
}
#[test]
fn finds_image() {
assert_eq!(
parse_image(" test"),
Some((
Image("an example".to_owned(), "example.com".to_owned(), None),
26
))
);
assert_eq!(
parse_image(" test"),
Some((Image("".to_owned(), "example.com".to_owned(), None), 16))
);
assert_eq!(
parse_image("![an example]() test"),
Some((Image("an example".to_owned(), "".to_owned(), None), 15))
);
assert_eq!(
parse_image("![]() test"),
Some((Image("".to_owned(), "".to_owned(), None), 5))
);
assert_eq!(
parse_image(" test"),
Some((
Image(
"an example".to_owned(),
"example.com".to_owned(),
Some("Title".to_owned())
),
34
))
);
assert_eq!(
parse_image(" test [a link](example.com)"),
Some((
Image("an example".to_owned(), "example.com".to_owned(), None),
26
))
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_image("![()] testing things test"), None);
assert_eq!(parse_image("!()[] testing things test"), None);
}
#[test]
fn no_early_matching() {
assert_eq!(parse_image("were  test"), None);
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/code.rs | src/parser/span/code.rs | use parser::Span;
use parser::Span::Code;
use regex::Regex;
pub fn parse_code(text: &str) -> Option<(Span, usize)> {
lazy_static! {
static ref CODE_SINGLE: Regex = Regex::new(r"^`(?P<text>.+?)`").unwrap();
static ref CODE_DOUBLE: Regex = Regex::new(r"^``(?P<text>.+?)``").unwrap();
}
if CODE_DOUBLE.is_match(text) {
let caps = CODE_DOUBLE.captures(text).unwrap();
let t = caps.name("text").unwrap().as_str();
return Some((Code(t.to_owned()), t.len() + 4));
} else if CODE_SINGLE.is_match(text) {
let caps = CODE_SINGLE.captures(text).unwrap();
let t = caps.name("text").unwrap().as_str();
return Some((Code(t.to_owned()), t.len() + 2));
}
None
}
#[test]
fn finds_code() {
assert_eq!(
parse_code("`testing things` test"),
Some((Code("testing things".to_owned()), 16))
);
assert_eq!(
parse_code("``testing things`` test"),
Some((Code("testing things".to_owned()), 18))
);
assert_eq!(
parse_code("``testing things`` things`` test"),
Some((Code("testing things".to_owned()), 18))
);
assert_eq!(
parse_code("`w` testing things test"),
Some((Code("w".to_owned()), 3))
);
assert_eq!(
parse_code("`w`` testing things test"),
Some((Code("w".to_owned()), 3))
);
assert_eq!(
parse_code("``w`` testing things test"),
Some((Code("w".to_owned()), 5))
);
assert_eq!(
parse_code("``w``` testing things test"),
Some((Code("w".to_owned()), 5))
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_code("`` testing things test"), None);
assert_eq!(parse_code("` test"), None);
}
#[test]
fn no_early_matching() {
assert_eq!(parse_code("were ``testing things`` test"), None);
assert_eq!(parse_code("were `testing things` test"), None);
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/emphasis.rs | src/parser/span/emphasis.rs | use parser::span::parse_spans;
use parser::Span;
use parser::Span::Emphasis;
use regex::Regex;
pub fn parse_emphasis(text: &str) -> Option<(Span, usize)> {
lazy_static! {
static ref EMPHASIS_UNDERSCORE: Regex = Regex::new(r"^_(?P<text>.+?)_").unwrap();
static ref EMPHASIS_STAR: Regex = Regex::new(r"^\*(?P<text>.+?)\*").unwrap();
}
if EMPHASIS_UNDERSCORE.is_match(text) {
let caps = EMPHASIS_UNDERSCORE.captures(text).unwrap();
let t = caps.name("text").unwrap().as_str();
return Some((Emphasis(parse_spans(t)), t.len() + 2));
} else if EMPHASIS_STAR.is_match(text) {
let caps = EMPHASIS_STAR.captures(text).unwrap();
let t = caps.name("text").unwrap().as_str();
return Some((Emphasis(parse_spans(t)), t.len() + 2));
}
None
}
#[cfg(test)]
mod test {
use super::parse_emphasis;
use parser::Span::{Emphasis, Text};
#[test]
fn finds_emphasis() {
assert_eq!(
parse_emphasis("_testing things_ test"),
Some((Emphasis(vec![Text("testing things".to_owned())]), 16))
);
assert_eq!(
parse_emphasis("*testing things* test"),
Some((Emphasis(vec![Text("testing things".to_owned())]), 16))
);
assert_eq!(
parse_emphasis("_testing things_ things_ test"),
Some((Emphasis(vec![Text("testing things".to_owned())]), 16))
);
assert_eq!(
parse_emphasis("_w_ things_ test"),
Some((Emphasis(vec![Text("w".to_owned())]), 3))
);
assert_eq!(
parse_emphasis("*w* things* test"),
Some((Emphasis(vec![Text("w".to_owned())]), 3))
);
assert_eq!(
parse_emphasis("_w__ testing things test"),
Some((Emphasis(vec![Text("w".to_owned())]), 3))
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_emphasis("__ testing things test"), None);
assert_eq!(parse_emphasis("_ test"), None);
}
#[test]
fn no_early_matching() {
assert_eq!(parse_emphasis("were _testing things_ test"), None);
assert_eq!(parse_emphasis("were *testing things* test"), None);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/strong.rs | src/parser/span/strong.rs | use parser::span::parse_spans;
use parser::Span;
use parser::Span::Strong;
use regex::Regex;
pub fn parse_strong(text: &str) -> Option<(Span, usize)> {
lazy_static! {
static ref STRONG_UNDERSCORE: Regex = Regex::new(r"^__(?P<text>.+?)__").unwrap();
static ref STRONG_STAR: Regex = Regex::new(r"^\*\*(?P<text>.+?)\*\*").unwrap();
}
if STRONG_UNDERSCORE.is_match(text) {
let caps = STRONG_UNDERSCORE.captures(text).unwrap();
let t = caps.name("text").unwrap().as_str();
return Some((Strong(parse_spans(t)), t.len() + 4));
} else if STRONG_STAR.is_match(text) {
let caps = STRONG_STAR.captures(text).unwrap();
let t = caps.name("text").unwrap().as_str();
return Some((Strong(parse_spans(t)), t.len() + 4));
}
None
}
#[cfg(test)]
mod test {
use super::parse_strong;
use parser::Span::{Strong, Text};
#[test]
fn finds_strong() {
assert_eq!(
parse_strong("__testing things__ test"),
Some((Strong(vec![Text("testing things".to_owned())]), 18))
);
assert_eq!(
parse_strong("**testing things** test"),
Some((Strong(vec![Text("testing things".to_owned())]), 18))
);
assert_eq!(
parse_strong("__testing things__ things__ test"),
Some((Strong(vec![Text("testing things".to_owned())]), 18))
);
assert_eq!(
parse_strong("__w__ things_ test"),
Some((Strong(vec![Text("w".to_owned())]), 5))
);
assert_eq!(
parse_strong("**w** things** test"),
Some((Strong(vec![Text("w".to_owned())]), 5))
);
assert_eq!(
parse_strong("__w___ testing things test"),
Some((Strong(vec![Text("w".to_owned())]), 5))
);
}
#[test]
fn no_false_positives() {
assert_eq!(parse_strong("__ testing things test"), None);
assert_eq!(parse_strong("__testing things** test"), None);
assert_eq!(parse_strong("____ testing things test"), None);
assert_eq!(parse_strong("** test"), None);
assert_eq!(parse_strong("**** test"), None);
}
#[test]
fn no_early_matching() {
assert_eq!(parse_strong("were __testing things__ test"), None);
assert_eq!(parse_strong("were **testing things** test"), None);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/src/parser/span/mod.rs | src/parser/span/mod.rs | use parser::Span;
use parser::Span::{Literal, Text};
mod br;
mod code;
mod emphasis;
mod image;
mod link;
mod strong;
use self::br::parse_break;
use self::code::parse_code;
use self::emphasis::parse_emphasis;
use self::image::parse_image;
use self::link::parse_link;
use self::strong::parse_strong;
pub fn parse_spans(text: &str) -> Vec<Span> {
let mut tokens = vec![];
let mut t = String::new();
let mut i = 0;
while i < text.len() {
match parse_span(&text[i..text.len()]) {
Some((span, consumed_chars)) => {
if !t.is_empty() {
// if this text is on the very left
// trim the left whitespace
if tokens.is_empty() {
t = t.trim_start().to_owned()
}
tokens.push(Text(t));
}
tokens.push(span);
t = String::new();
i += consumed_chars;
}
None => {
let mut e = i + 1;
while !text.is_char_boundary(e) {
e += 1;
}
t.push_str(&text[i..e]);
i += e - i;
}
}
}
if !t.is_empty() {
// if this text is on the very left
// trim the left whitespace
if tokens.is_empty() {
t = t.trim_start().to_owned();
}
// we're at the very end of this line,
// trim trailing whitespace
t = t.trim_end().to_owned();
tokens.push(Text(t));
}
tokens
}
fn parse_escape(text: &str) -> Option<(Span, usize)> {
let mut chars = text.chars();
if let Some('\\') = chars.next() {
return match chars.next() {
Some(x @ '\\') | Some(x @ '`') | Some(x @ '*') | Some(x @ '_') | Some(x @ '{')
| Some(x @ '}') | Some(x @ '[') | Some(x @ ']') | Some(x @ '(') | Some(x @ ')')
| Some(x @ '#') | Some(x @ '+') | Some(x @ '-') | Some(x @ '.') | Some(x @ '!') => {
Some((Literal(x), 2))
}
_ => None,
};
}
None
}
fn parse_span(text: &str) -> Option<(Span, usize)> {
pipe_opt!(
text
=> parse_escape
=> parse_code
=> parse_strong
=> parse_emphasis
=> parse_break
=> parse_image
=> parse_link
)
}
#[cfg(test)]
mod test {
use parser::span::parse_spans;
use parser::Span::{Break, Code, Emphasis, Image, Link, Literal, Strong, Text};
use std::str;
#[test]
fn converts_into_text() {
assert_eq!(
parse_spans("this is a test"),
vec![Text("this is a test".to_owned())]
);
}
#[test]
fn finds_escapes() {
assert_eq!(parse_spans(r"\*"), vec![Literal('*')]);
}
#[test]
fn finds_breaks() {
assert_eq!(
parse_spans("this is a test "),
vec![Text("this is a test".to_owned()), Break]
);
}
#[test]
fn finds_code() {
assert_eq!(
parse_spans("this `is a` test"),
vec![
Text("this ".to_owned()),
Code("is a".to_owned()),
Text(" test".to_owned())
]
);
assert_eq!(
parse_spans("this ``is a`` test"),
vec![
Text("this ".to_owned()),
Code("is a".to_owned()),
Text(" test".to_owned())
]
);
}
#[test]
fn finds_emphasis() {
assert_eq!(
parse_spans("this _is a_ test"),
vec![
Text("this ".to_owned()),
Emphasis(vec![Text("is a".to_owned())]),
Text(" test".to_owned())
]
);
assert_eq!(
parse_spans("this *is a* test"),
vec![
Text("this ".to_owned()),
Emphasis(vec![Text("is a".to_owned())]),
Text(" test".to_owned())
]
);
}
#[test]
fn finds_strong() {
assert_eq!(
parse_spans("this __is a__ test"),
vec![
Text("this ".to_owned()),
Strong(vec![Text("is a".to_owned())]),
Text(" test".to_owned())
]
);
assert_eq!(
parse_spans("this **is a** test"),
vec![
Text("this ".to_owned()),
Strong(vec![Text("is a".to_owned())]),
Text(" test".to_owned())
]
);
}
#[test]
fn finds_link() {
assert_eq!(
parse_spans("this is [an example](example.com) test"),
vec![
Text("this is ".to_owned()),
Link(
vec![Text("an example".to_owned())],
"example.com".to_owned(),
None
),
Text(" test".to_owned())
]
);
}
#[test]
fn finds_image() {
assert_eq!(
parse_spans("this is  test"),
vec![
Text("this is ".to_owned()),
Image("an example".to_owned(), "example.com".to_owned(), None),
Text(" test".to_owned())
]
);
}
#[test]
fn finds_everything() {
assert_eq!(
parse_spans("some text  _emphasis_ __strong__ `teh codez` [a link](example.com) "),
vec![
Text("some text ".to_owned()),
Image("an image".to_owned(), "image.com".to_owned(), None),
Text(" ".to_owned()),
Emphasis(vec![Text("emphasis".to_owned())]),
Text(" ".to_owned()),
Strong(vec![Text("strong".to_owned())]),
Text(" ".to_owned()),
Code("teh codez".to_owned()),
Text(" ".to_owned()),
Link(vec![Text("a link".to_owned())], "example.com".to_owned(), None),
Break
]
);
}
#[test]
fn properly_consumes_multibyte_utf8() {
let test_phrase = str::from_utf8(b"This shouldn\xE2\x80\x99t panic").unwrap();
let _ = parse_spans(&test_phrase);
}
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/tests/lib.rs | tests/lib.rs | extern crate difference;
extern crate markdown;
mod fixtures;
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
johannhof/markdown.rs | https://github.com/johannhof/markdown.rs/blob/68ff3f837a24e47219e204b2c420a4f008951387/tests/fixtures/mod.rs | tests/fixtures/mod.rs | use difference;
use markdown;
use std::fs::File;
use std::io::Read;
use std::path::Path;
fn compare(name: &str) {
let html = format!("tests/fixtures/files/{}.html", name);
let text = format!("tests/fixtures/files/{}.text", name);
let mut comp = String::new();
File::open(Path::new(&html))
.unwrap()
.read_to_string(&mut comp)
.unwrap();
let md = Path::new(&text);
let mut tokens = String::new();
File::open(md).unwrap().read_to_string(&mut tokens).unwrap();
println!("{:?} -> {:?}", tokens, markdown::tokenize(&tokens));
difference::assert_diff(&comp, &markdown::file_to_html(md).unwrap(), " ", 0);
}
fn roundtrip(name: &str) {
let html = format!("tests/fixtures/files/{}.html", name);
let text = format!("tests/fixtures/files/{}.text", name);
let mut comp = String::new();
File::open(Path::new(&html))
.unwrap()
.read_to_string(&mut comp)
.unwrap();
let md = Path::new(&text);
let mut tokens = String::new();
File::open(md).unwrap().read_to_string(&mut tokens).unwrap();
let v = markdown::tokenize(&tokens);
println!("{:?}", v);
let out = markdown::generate_markdown(v);
println!("BEGIN\n{}\nEND", out);
difference::assert_diff(&comp, &markdown::to_html(&out), " ", 0);
}
#[test]
pub fn alt() {
compare("alt")
}
#[test]
pub fn rt_alt() {
roundtrip("alt")
}
#[test]
pub fn blank() {
compare("blank")
}
#[test]
pub fn rt_blank() {
roundtrip("blank")
}
#[test]
pub fn blanks_in_code() {
compare("blanks_in_code")
}
#[test]
pub fn rt_blanks_in_code() {
roundtrip("blanks_in_code")
}
#[test]
pub fn brackets_in_links() {
compare("brackets_in_links")
}
#[test]
pub fn rt_brackets_in_links() {
roundtrip("brackets_in_links")
}
#[test]
pub fn code() {
compare("code")
}
#[test]
pub fn rt_code() {
roundtrip("code")
}
#[test]
pub fn code2() {
compare("code2")
}
#[test]
pub fn rt_code2() {
roundtrip("code2")
}
#[test]
pub fn code3() {
compare("code3")
}
#[test]
pub fn rt_code3() {
roundtrip("code3")
}
#[test]
pub fn easy() {
compare("easy")
}
#[test]
pub fn rt_easy() {
roundtrip("easy")
}
#[test]
pub fn entities() {
compare("entities")
}
#[test]
pub fn rt_entities() {
roundtrip("entities")
}
#[test]
pub fn escaping() {
compare("escaping")
}
#[test]
pub fn rt_escaping() {
roundtrip("escaping")
}
#[test]
pub fn headers() {
compare("headers")
}
#[test]
pub fn rt_headers() {
roundtrip("headers")
}
#[test]
pub fn hex_entities() {
compare("hex_entities")
}
#[test]
pub fn rt_hex_entities() {
roundtrip("hex_entities")
}
#[test]
pub fn list1() {
compare("list1")
}
#[test]
pub fn rt_list1() {
roundtrip("list1")
}
#[test]
pub fn list2() {
compare("list2")
}
#[test]
pub fn rt_list2() {
roundtrip("list2")
}
#[test]
pub fn list3() {
compare("list3")
}
#[test]
pub fn rt_list3() {
roundtrip("list3")
}
#[test]
pub fn lists() {
compare("lists")
}
#[test]
pub fn rt_lists() {
roundtrip("lists")
}
#[test]
pub fn lists8() {
compare("lists8")
}
#[test]
pub fn rt_lists8() {
roundtrip("lists8")
}
#[test]
pub fn lists_ol() {
compare("lists_ol")
}
//#[test]
//pub fn rt_lists_ol() {
//roundtrip("lists_ol")
//}
#[test]
pub fn links() {
compare("links")
}
#[test]
pub fn rt_links() {
roundtrip("links")
}
#[test]
pub fn numbers() {
compare("numbers")
}
#[test]
pub fn rt_numbers() {
roundtrip("numbers")
}
#[test]
pub fn one() {
compare("one")
}
#[test]
pub fn rt_one() {
roundtrip("one")
}
#[test]
pub fn olist() {
compare("olist")
}
//#[test]
//pub fn rt_olist() {
//roundtrip("olist")
//}
#[test]
pub fn paragraph() {
compare("paragraph")
}
#[test]
pub fn rt_paragraph() {
roundtrip("paragraph")
}
#[test]
pub fn paragraphs() {
compare("paragraphs")
}
#[test]
pub fn rt_paragraphs() {
roundtrip("paragraphs")
}
#[test]
pub fn test() {
compare("test")
}
#[test]
pub fn rt_test() {
roundtrip("test")
}
#[test]
pub fn utf8() {
compare("utf8")
}
#[test]
pub fn rt_utf8() {
roundtrip("utf8")
}
#[test]
pub fn wrapping() {
compare("wrapping")
}
#[test]
pub fn rt_wrapping() {
roundtrip("wrapping")
}
| rust | Apache-2.0 | 68ff3f837a24e47219e204b2c420a4f008951387 | 2026-01-04T20:20:52.013926Z | false |
landaire/pdbview | https://github.com/landaire/pdbview/blob/f9be6c394d93e8f2fd415df6ee15495aac316648/crates/ezpdb/src/symbol_types.rs | crates/ezpdb/src/symbol_types.rs | use crate::type_info::Type;
use log::warn;
use pdb::{FallibleIterator, TypeIndex};
#[cfg(feature = "serde")]
use serde::Serialize;
use std::cell::RefCell;
use std::collections::HashMap;
use std::convert::{From, TryFrom};
use std::path::PathBuf;
use std::rc::Rc;
pub type TypeRef = Rc<RefCell<Type>>;
pub type TypeIndexNumber = u32;
/// Represents a PDB that has been fully parsed
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct ParsedPdb {
pub path: PathBuf,
pub assembly_info: AssemblyInfo,
pub public_symbols: Vec<PublicSymbol>,
pub types: HashMap<TypeIndexNumber, TypeRef>,
pub procedures: Vec<Procedure>,
pub global_data: Vec<Data>,
pub debug_modules: Vec<DebugModule>,
#[cfg_attr(feature = "serde", serde(skip_serializing))]
pub(crate) forward_references: Vec<Rc<Type>>,
pub version: Version,
#[cfg_attr(feature = "serde", serde(serialize_with = "serialize_uuid"))]
pub guid: uuid::Uuid,
pub age: u32,
pub timestamp: u32,
pub machine_type: Option<MachineType>,
}
impl ParsedPdb {
/// Constructs a new [ParsedPdb] with the corresponding path
pub fn new(path: PathBuf) -> Self {
ParsedPdb {
path,
assembly_info: AssemblyInfo::default(),
public_symbols: vec![],
types: Default::default(),
procedures: vec![],
global_data: vec![],
debug_modules: vec![],
forward_references: vec![],
version: Version::Other(0),
guid: uuid::Uuid::nil(),
age: 0,
timestamp: 0,
machine_type: None,
}
}
}
#[cfg(feature = "serde")]
fn serialize_uuid<S: serde::Serializer>(uuid: &uuid::Uuid, s: S) -> Result<S::Ok, S::Error> {
s.serialize_str(uuid.to_string().as_ref())
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum MachineType {
/// The contents of this field are assumed to be applicable to any machine type.
Unknown,
/// Matsushita AM33
Am33,
/// x64
Amd64,
/// ARM little endian
Arm,
/// ARM64 little endian
Arm64,
/// ARM Thumb-2 little endian
ArmNT,
/// EFI byte code
Ebc,
/// Intel 386 or later processors and compatible processors
X86,
/// Intel Itanium processor family
Ia64,
/// Mitsubishi M32R little endian
M32R,
/// MIPS16
Mips16,
/// MIPS with FPU
MipsFpu,
/// MIPS16 with FPU
MipsFpu16,
/// Power PC little endian
PowerPC,
/// Power PC with floating point support
PowerPCFP,
/// MIPS little endian
R4000,
/// RISC-V 32-bit address space
RiscV32,
/// RISC-V 64-bit address space
RiscV64,
/// RISC-V 128-bit address space
RiscV128,
/// Hitachi SH3
SH3,
/// Hitachi SH3 DSP
SH3DSP,
/// Hitachi SH4
SH4,
/// Hitachi SH5
SH5,
/// Thumb
Thumb,
/// MIPS little-endian WCE v2
WceMipsV2,
/// Invalid value
Invalid,
}
impl From<&pdb::MachineType> for MachineType {
fn from(machine_type: &pdb::MachineType) -> Self {
match machine_type {
pdb::MachineType::Unknown => MachineType::Unknown,
pdb::MachineType::Am33 => MachineType::Am33,
pdb::MachineType::Amd64 => MachineType::Amd64,
pdb::MachineType::Arm => MachineType::Arm,
pdb::MachineType::Arm64 => MachineType::Arm64,
pdb::MachineType::ArmNT => MachineType::ArmNT,
pdb::MachineType::Ebc => MachineType::Ebc,
pdb::MachineType::X86 => MachineType::X86,
pdb::MachineType::Ia64 => MachineType::Ia64,
pdb::MachineType::M32R => MachineType::M32R,
pdb::MachineType::Mips16 => MachineType::Mips16,
pdb::MachineType::MipsFpu => MachineType::MipsFpu,
pdb::MachineType::MipsFpu16 => MachineType::MipsFpu16,
pdb::MachineType::PowerPC => MachineType::PowerPC,
pdb::MachineType::PowerPCFP => MachineType::PowerPCFP,
pdb::MachineType::R4000 => MachineType::R4000,
pdb::MachineType::RiscV32 => MachineType::RiscV32,
pdb::MachineType::RiscV64 => MachineType::RiscV64,
pdb::MachineType::RiscV128 => MachineType::RiscV128,
pdb::MachineType::SH3 => MachineType::SH3,
pdb::MachineType::SH3DSP => MachineType::SH3DSP,
pdb::MachineType::SH4 => MachineType::SH4,
pdb::MachineType::SH5 => MachineType::SH5,
pdb::MachineType::Thumb => MachineType::Thumb,
pdb::MachineType::WceMipsV2 => MachineType::WceMipsV2,
pdb::MachineType::Invalid => MachineType::Invalid,
other => panic!("unsupported machine type encountered: {:?}", other),
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum Version {
V41,
V50,
V60,
V70,
V110,
Other(u32),
}
impl From<&pdb::HeaderVersion> for Version {
fn from(version: &pdb::HeaderVersion) -> Self {
match version {
pdb::HeaderVersion::V41 => Version::V41,
pdb::HeaderVersion::V50 => Version::V50,
pdb::HeaderVersion::V60 => Version::V60,
pdb::HeaderVersion::V70 => Version::V70,
pdb::HeaderVersion::V110 => Version::V110,
pdb::HeaderVersion::OtherValue(other) => Version::Other(*other),
other => panic!("unsupported PDB version encountered: {:?}", other),
}
}
}
#[derive(Debug, Default)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct AssemblyInfo {
pub build_info: Option<BuildInfo>,
pub compiler_info: Option<CompilerInfo>,
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct BuildInfo {
arguments: Vec<String>,
}
impl TryFrom<(&pdb::BuildInfoSymbol, Option<&pdb::IdFinder<'_>>)> for BuildInfo {
type Error = crate::error::Error;
fn try_from(
info: (&pdb::BuildInfoSymbol, Option<&pdb::IdFinder<'_>>),
) -> Result<Self, Self::Error> {
let (symbol, finder) = info;
if finder.is_none() {
return Err(crate::error::Error::MissingDependency("IdFinder"));
}
let finder = finder.unwrap();
let build_info = finder
.find(symbol.id)?
.parse()
.expect("failed to parse build info");
match build_info {
pdb::IdData::BuildInfo(build_info_id) => {
let argument_ids: Vec<_> = build_info_id
.arguments
.iter()
.map(|id| finder.find(*id))
.collect::<Result<Vec<_>, _>>()?;
// TODO: Move this out into its own function for ID parsing
let arguments: Vec<String> = argument_ids
.iter()
.map(|id| match id.parse()? {
pdb::IdData::String(s) => {
Ok::<String, Self::Error>(s.name.to_string().into_owned())
}
other => panic!("unexpected ID type : {:?}", other),
})
.collect::<Result<Vec<_>, _>>()?;
return Ok(BuildInfo { arguments });
}
_ => unreachable!(),
};
Err(crate::error::Error::Unsupported("BuildInfo"))
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct CompilerInfo {
// TODO: cpu_type, flags, language
pub language: String,
pub flags: CompileFlags,
pub cpu_type: String,
pub frontend_version: CompilerVersion,
pub backend_version: CompilerVersion,
pub version_string: String,
}
impl From<pdb::CompileFlagsSymbol<'_>> for CompilerInfo {
fn from(flags: pdb::CompileFlagsSymbol<'_>) -> Self {
let pdb::CompileFlagsSymbol {
language,
flags,
cpu_type,
frontend_version,
backend_version,
version_string,
} = flags;
CompilerInfo {
language: language.to_string(),
flags: flags.into(),
cpu_type: cpu_type.to_string(),
frontend_version: frontend_version.into(),
backend_version: backend_version.into(),
version_string: version_string.to_string().into_owned(),
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct CompileFlags {
/// Compiled for edit and continue.
pub edit_and_continue: bool,
/// Compiled without debugging info.
pub no_debug_info: bool,
/// Compiled with `LTCG`.
pub link_time_codegen: bool,
/// Compiled with `/bzalign`.
pub no_data_align: bool,
/// Managed code or data is present.
pub managed: bool,
/// Compiled with `/GS`.
pub security_checks: bool,
/// Compiled with `/hotpatch`.
pub hot_patch: bool,
/// Compiled with `CvtCIL`.
pub cvtcil: bool,
/// This is a MSIL .NET Module.
pub msil_module: bool,
/// Compiled with `/sdl`.
pub sdl: bool,
/// Compiled with `/ltcg:pgo` or `pgo:`.
pub pgo: bool,
/// This is a .exp module.
pub exp_module: bool,
}
impl From<pdb::CompileFlags> for CompileFlags {
fn from(flags: pdb::CompileFlags) -> Self {
let pdb::CompileFlags {
edit_and_continue,
no_debug_info,
link_time_codegen,
no_data_align,
managed,
security_checks,
hot_patch,
cvtcil,
msil_module,
sdl,
pgo,
exp_module,
..
} = flags;
CompileFlags {
edit_and_continue,
no_debug_info,
link_time_codegen,
no_data_align,
managed,
security_checks,
hot_patch,
cvtcil,
msil_module,
sdl,
pgo,
exp_module,
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct CompilerVersion {
pub major: u16,
pub minor: u16,
pub build: u16,
pub qfe: Option<u16>,
}
impl From<pdb::CompilerVersion> for CompilerVersion {
fn from(version: pdb::CompilerVersion) -> Self {
let pdb::CompilerVersion {
major,
minor,
build,
qfe,
} = version;
CompilerVersion {
major,
minor,
build,
qfe,
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct DebugModule {
name: String,
object_file_name: String,
source_files: Option<Vec<FileInfo>>,
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
enum Checksum {
None,
Md5(Vec<u8>),
Sha1(Vec<u8>),
Sha256(Vec<u8>),
}
impl From<pdb::FileChecksum<'_>> for Checksum {
fn from(checksum: pdb::FileChecksum<'_>) -> Self {
match checksum {
pdb::FileChecksum::None => Checksum::None,
pdb::FileChecksum::Md5(data) => Checksum::Md5(data.to_vec()),
pdb::FileChecksum::Sha1(data) => Checksum::Sha1(data.to_vec()),
pdb::FileChecksum::Sha256(data) => Checksum::Sha256(data.to_vec()),
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct FileInfo {
name: String,
checksum: Checksum,
}
impl
From<(
&pdb::Module<'_>,
Option<&pdb::ModuleInfo<'_>>,
Option<&pdb::StringTable<'_>>,
)> for DebugModule
{
fn from(
data: (
&pdb::Module<'_>,
Option<&pdb::ModuleInfo<'_>>,
Option<&pdb::StringTable<'_>>,
),
) -> Self {
let (module, info, string_table) = data;
let source_files: Option<Vec<FileInfo>> = string_table
.and_then(|string_table| {
info.and_then(|info| {
info.line_program().ok().map(|prog| {
prog.files()
.map(|f| {
let file_name = f
.name
.to_string_lossy(string_table)
.expect("failed to convert string")
.to_string();
Ok(FileInfo {
name: file_name,
checksum: f.checksum.into(),
})
})
.collect()
.ok()
})
})
})
.flatten();
DebugModule {
name: module.module_name().to_string(),
object_file_name: module.object_file_name().to_string(),
source_files,
}
}
}
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct PublicSymbol {
pub name: String,
pub is_code: bool,
pub is_function: bool,
pub is_managed: bool,
pub is_msil: bool,
pub offset: Option<usize>,
}
impl From<(pdb::PublicSymbol<'_>, usize, Option<&pdb::AddressMap<'_>>)> for PublicSymbol {
fn from(data: (pdb::PublicSymbol<'_>, usize, Option<&pdb::AddressMap<'_>>)) -> Self {
let (sym, base_address, address_map) = data;
let pdb::PublicSymbol {
code,
function,
managed,
msil,
offset,
name,
} = sym;
if offset.section == 0 {
warn!(
"symbol type has an invalid section index and RVA will be invalid: {:?}",
sym
)
}
let offset = address_map.and_then(|address_map| {
offset
.to_rva(address_map)
.map(|rva| u32::from(rva) as usize + base_address)
});
PublicSymbol {
name: name.to_string().to_string(),
is_code: code,
is_function: function,
is_managed: managed,
is_msil: msil,
offset,
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Data {
pub name: String,
pub is_global: bool,
pub is_managed: bool,
pub ty: TypeRef,
pub offset: Option<usize>,
}
impl
TryFrom<(
pdb::DataSymbol<'_>,
usize,
Option<&pdb::AddressMap<'_>>,
&HashMap<TypeIndexNumber, TypeRef>,
)> for Data
{
type Error = crate::error::Error;
fn try_from(
data: (
pdb::DataSymbol<'_>,
usize,
Option<&pdb::AddressMap<'_>>,
&HashMap<TypeIndexNumber, TypeRef>,
),
) -> Result<Self, Self::Error> {
let (sym, base_address, address_map, parsed_types) = data;
let pdb::DataSymbol {
global,
managed,
type_index,
offset,
name,
} = sym;
let offset = address_map.and_then(|address_map| {
offset
.to_rva(address_map)
.map(|rva| u32::from(rva) as usize + base_address)
});
let ty = Rc::clone(
parsed_types
.get(&type_index.0)
.ok_or(Self::Error::UnresolvedType(type_index.0))?,
);
let data = Data {
name: name.to_string().to_string(),
is_global: global,
is_managed: managed,
ty,
offset,
};
Ok(data)
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Procedure {
pub name: String,
pub signature: Option<String>,
pub type_index: TypeIndexNumber,
/// This reflects the RVA in the transformed address space. See [PdbInternalSectionOffset docs](https://docs.rs/pdb/latest/pdb/struct.PdbInternalSectionOffset.html)
/// for more details.
pub address: Option<usize>,
pub len: usize,
pub is_global: bool,
pub is_dpc: bool,
/// length of this procedure in BYTES
pub prologue_end: usize,
pub epilogue_start: usize,
}
impl
From<(
pdb::ProcedureSymbol<'_>,
usize,
Option<&pdb::AddressMap<'_>>,
&pdb::ItemFinder<'_, pdb::TypeIndex>,
)> for Procedure
{
fn from(
data: (
pdb::ProcedureSymbol<'_>,
usize,
Option<&pdb::AddressMap<'_>>,
&pdb::ItemFinder<'_, pdb::TypeIndex>,
),
) -> Self {
let (sym, base_address, address_map, type_finder) = data;
let pdb::ProcedureSymbol {
global,
dpc,
parent,
end,
next,
len,
dbg_start_offset,
dbg_end_offset,
type_index,
offset,
flags,
name,
} = sym;
if offset.section == 0 {
warn!(
"symbol type has an invalid section index and RVA will be invalid: {:?}",
sym
)
}
let address = address_map.and_then(|address_map| {
offset
.to_rva(address_map)
.map(|rva| u32::from(rva) as usize + base_address)
});
let signature = type_finder.find(type_index).ok().map(|type_info| {
format!(
"{:?}",
type_info.parse().expect("failed to parse type info")
)
});
Procedure {
name: name.to_string().to_string(),
signature,
type_index: type_index.0,
address,
len: len as usize,
is_global: global,
is_dpc: dpc,
prologue_end: dbg_start_offset as usize,
epilogue_start: dbg_end_offset as usize,
}
}
}
| rust | MIT | f9be6c394d93e8f2fd415df6ee15495aac316648 | 2026-01-04T20:20:57.282285Z | false |
landaire/pdbview | https://github.com/landaire/pdbview/blob/f9be6c394d93e8f2fd415df6ee15495aac316648/crates/ezpdb/src/lib.rs | crates/ezpdb/src/lib.rs | use crate::error::Error;
use crate::symbol_types::*;
use log::{debug, warn};
use pdb::{
AddressMap, AnnotationReferenceSymbol, FallibleIterator, IdIndex, ItemFinder, Symbol,
SymbolData, TypeData, TypeIndex, PDB,
};
use std::cell::RefCell;
use std::convert::TryInto;
use std::fs::File;
use std::path::Path;
use std::rc::Rc;
pub mod error;
pub mod symbol_types;
pub mod type_info;
pub use crate::symbol_types::ParsedPdb;
pub fn parse_pdb<P: AsRef<Path>>(
path: P,
base_address: Option<usize>,
) -> Result<ParsedPdb, crate::error::Error> {
let file = File::open(path.as_ref())?;
debug!("opening PDB");
let mut pdb = PDB::open(file)?;
let mut output_pdb = ParsedPdb::new(path.as_ref().to_owned());
let dbi = pdb.debug_information()?;
let pdbi = pdb.pdb_information()?;
output_pdb.machine_type = dbi
.machine_type()
.ok()
.map(|machine_type| (&machine_type).into());
output_pdb.age = match dbi.age() {
Some(age) => age,
None => pdbi.age,
};
output_pdb.guid = pdbi.guid;
output_pdb.timestamp = pdbi.signature;
output_pdb.version = (&pdbi.version).into();
debug!("getting address map");
let address_map = pdb.address_map().ok();
debug!("grabbing string table");
let string_table = pdb.string_table().ok();
debug!("fetching ID information");
// Some symbols such as build information rely on IDs being known. Iterate these to
// build the database
let id_information = pdb.id_information();
let id_finder = match &id_information {
Ok(id_information) => {
debug!("ID information header was valid");
let mut id_finder = id_information.finder();
let mut iter = id_information.iter();
while let Some(_id) = iter.next()? {
id_finder.update(&iter);
}
Some(id_finder)
}
Err(e) => {
warn!("error when fetching id_information: {}. ID information and symbols depending on such will not be loaded", e);
None
}
};
debug!("grabbing type information");
// Parse type information first. Some symbol info (such as function signatures) depends
// upon type information, but not vice versa
let type_information = pdb.type_information()?;
let mut type_finder = type_information.finder();
let mut iter = type_information.iter();
let mut discovered_types = vec![];
while let Some(typ) = iter.next()? {
type_finder.update(&iter);
discovered_types.push(typ.index());
}
for typ in discovered_types.iter() {
let _typ = match handle_type(*typ, &mut output_pdb, &type_finder) {
Ok(typ) => typ,
Err(Error::PdbCrateError(e @ pdb::Error::UnimplementedTypeKind(_))) => {
warn!("Could not parse type: {}", e);
continue;
}
// TypeNotFound is commonly raised because the PDB spec is not open, so
// some types are unknown to this crate. We can ignore these and just fail
// any type depending on something we cannot resolve.
Err(Error::PdbCrateError(e @ pdb::Error::TypeNotFound(_))) => {
warn!("{}", e);
continue;
}
Err(e) => return Err(e),
};
}
// Iterate through all of the parsed types once just to update any necessary info
for typ in output_pdb.types.values() {
use crate::type_info::Typed;
typ.as_ref().borrow_mut().on_complete(&output_pdb);
}
// Iterate through all of the parsed types once just to update any necessary info
// for typ in output_pdb.types.values() {
// println!("{:#?}", typ.as_ref().borrow());
// }
debug!("grabbing public symbols");
// Parse public symbols
let symbol_table = pdb.global_symbols()?;
let mut symbols = symbol_table.iter();
while let Some(symbol) = symbols.next()? {
if let Err(e) = handle_symbol(
symbol,
&mut output_pdb,
address_map.as_ref(),
&type_finder,
id_finder.as_ref(),
base_address,
) {
warn!("Error handling symbol {:?}: {}", symbol, e);
}
}
debug!("grabbing debug modules");
// Parse private symbols
let debug_info = pdb.debug_information()?;
let mut modules = debug_info.modules()?;
while let Some(module) = modules.next()? {
let module_info = pdb.module_info(&module)?;
output_pdb
.debug_modules
.push((&module, module_info.as_ref(), string_table.as_ref()).into());
if module_info.is_none() {
warn!("Could not get module info for debug module: {:?}", module);
continue;
}
debug!("grabbing symbols for module: {}", module.module_name());
let module_info = module_info.unwrap();
let mut symbol_iter = module_info.symbols()?;
while let Some(symbol) = symbol_iter.next()? {
if let Err(e) = handle_symbol(
symbol,
&mut output_pdb,
address_map.as_ref(),
&type_finder,
id_finder.as_ref(),
base_address,
) {
warn!("Error handling symbol {:?}: {}", symbol, e);
}
}
}
Ok(output_pdb)
}
/// Converts a [pdb::SymbolData] object to a parsed symbol representation that
/// we can serialize and adds it to the appropriate fields on the output [ParsedPdb].
/// Errors returned from this function should not be considered fatal.
fn handle_symbol(
sym: Symbol,
output_pdb: &mut ParsedPdb,
address_map: Option<&AddressMap>,
type_finder: &ItemFinder<'_, TypeIndex>,
id_finder: Option<&ItemFinder<'_, IdIndex>>,
base_address: Option<usize>,
) -> Result<(), Error> {
let base_address = base_address.unwrap_or(0);
let sym = sym.parse()?;
match sym {
SymbolData::Public(data) => {
debug!("public symbol: {:?}", data);
let converted_symbol: crate::symbol_types::PublicSymbol =
(data, base_address, address_map).into();
output_pdb.public_symbols.push(converted_symbol);
}
SymbolData::Procedure(data) => {
debug!("procedure: {:?}", data);
let converted_symbol: crate::symbol_types::Procedure =
(data, base_address, address_map, type_finder).into();
output_pdb.procedures.push(converted_symbol);
}
SymbolData::BuildInfo(data) => {
debug!("build info: {:?}", data);
let converted_symbol: crate::symbol_types::BuildInfo = (&data, id_finder).try_into()?;
output_pdb.assembly_info.build_info = Some(converted_symbol);
}
SymbolData::CompileFlags(data) => {
debug!("compile flags: {:?}", data);
let sym: crate::symbol_types::CompilerInfo = data.into();
output_pdb.assembly_info.compiler_info = Some(sym);
}
SymbolData::AnnotationReference(annotation) => {
debug!("annotation reference: {:?}", annotation);
// let sym: crate::symbol_types::AnnotationReference = annotation.try_into()?;
// output_pdb.annotation_references.push()
}
SymbolData::Data(data) => {
let sym: crate::symbol_types::Data =
(data, base_address, address_map, &output_pdb.types).try_into()?;
if sym.is_global {
output_pdb.global_data.push(sym);
}
}
other => {
warn!("Unhandled SymbolData: {:?}", other);
}
}
Ok(())
}
/// Converts a [pdb::SymbolData] object to a parsed symbol representation that
/// we can serialize and adds it to the appropriate fields on the output [ParsedPdb].
/// Errors returned from this function should not be considered fatal.
pub(crate) fn handle_type(
idx: pdb::TypeIndex,
output_pdb: &mut ParsedPdb,
type_finder: &ItemFinder<'_, TypeIndex>,
) -> Result<TypeRef, Error> {
use crate::type_info::{Class, Type, Union};
if let Some(typ) = output_pdb.types.get(&idx.0) {
return Ok(Rc::clone(typ));
}
let typ = type_finder.find(idx).expect("failed to resolve type");
let parsed_type = &typ.parse()?;
let typ = handle_type_data(parsed_type, output_pdb, type_finder)?;
output_pdb.types.insert(idx.0, Rc::clone(&typ));
Ok(typ)
}
pub(crate) fn handle_type_data(
typ: &pdb::TypeData,
output_pdb: &mut ParsedPdb,
type_finder: &ItemFinder<'_, TypeIndex>,
) -> Result<TypeRef, Error> {
use crate::type_info::{Class, Type};
let typ = match typ {
TypeData::Class(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Class(typ)
}
TypeData::Union(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Union(typ)
}
TypeData::Bitfield(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Bitfield(typ)
}
TypeData::Array(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Array(typ)
}
TypeData::Enumerate(data) => {
let typ = data.try_into()?;
Type::EnumVariant(typ)
}
TypeData::Enumeration(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Enumeration(typ)
}
TypeData::Pointer(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Pointer(typ)
}
TypeData::Primitive(data) => {
let typ = data.try_into()?;
Type::Primitive(typ)
}
TypeData::FieldList(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::FieldList(typ)
}
TypeData::Modifier(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Modifier(typ)
}
TypeData::Member(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Member(typ)
}
TypeData::ArgumentList(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::ArgumentList(typ)
}
TypeData::Procedure(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Procedure(typ)
}
TypeData::MemberFunction(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::MemberFunction(typ)
}
TypeData::MethodList(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::MethodList(typ)
}
TypeData::VirtualBaseClass(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::VirtualBaseClass(typ)
}
TypeData::Nested(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Nested(typ)
}
TypeData::OverloadedMethod(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::OverloadedMethod(typ)
}
TypeData::Method(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::Method(typ)
}
TypeData::StaticMember(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::StaticMember(typ)
}
TypeData::BaseClass(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::BaseClass(typ)
}
TypeData::VirtualFunctionTablePointer(data) => {
let typ = (data, type_finder, output_pdb).try_into()?;
Type::VTable(typ)
}
other => {
warn!("Unhandled type: {:?}", other);
panic!("type not handled: {:?}", other);
}
};
Ok(Rc::new(RefCell::new(typ)))
}
| rust | MIT | f9be6c394d93e8f2fd415df6ee15495aac316648 | 2026-01-04T20:20:57.282285Z | false |
landaire/pdbview | https://github.com/landaire/pdbview/blob/f9be6c394d93e8f2fd415df6ee15495aac316648/crates/ezpdb/src/error.rs | crates/ezpdb/src/error.rs | use thiserror::Error;
use crate::symbol_types::TypeIndexNumber;
#[derive(Error, Debug)]
pub enum Error {
#[error("the PDB parsing library encountered an error: {0}")]
PdbCrateError(#[from] pdb::Error),
#[error("dependency `{0}` required for parsing is unavailable")]
MissingDependency(&'static str),
#[error("functionality `{0}` is currently unsupported")]
Unsupported(&'static str),
#[error("a forward reference implmentation is needed")]
NeedForwardReferenceImplementation,
#[error("type `{0}` was not handled")]
UnhandledType(String),
#[error("IO error occurred: {0}")]
IoError(#[from] std::io::Error),
#[error("could not resolve type index {0}")]
UnresolvedType(TypeIndexNumber),
}
| rust | MIT | f9be6c394d93e8f2fd415df6ee15495aac316648 | 2026-01-04T20:20:57.282285Z | false |
landaire/pdbview | https://github.com/landaire/pdbview/blob/f9be6c394d93e8f2fd415df6ee15495aac316648/crates/ezpdb/src/type_info.rs | crates/ezpdb/src/type_info.rs | use crate::error::Error;
use crate::symbol_types::ParsedPdb;
use crate::symbol_types::TypeRef;
#[cfg(feature = "serde")]
use serde::Serialize;
use std::convert::{From, TryFrom, TryInto};
use std::rc::Rc;
use log::warn;
pub trait Typed {
/// Returns the size (in bytes) of this type
fn type_size(&self, pdb: &ParsedPdb) -> usize;
/// Called after all types have been parsed
fn on_complete(&mut self, pdb: &ParsedPdb) {}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum Type {
Class(Class),
VirtualBaseClass(VirtualBaseClass),
Union(Union),
Bitfield(Bitfield),
Enumeration(Enumeration),
EnumVariant(EnumVariant),
Pointer(Pointer),
Primitive(Primitive),
Array(Array),
FieldList(FieldList),
ArgumentList(ArgumentList),
Modifier(Modifier),
Member(Member),
Procedure(Procedure),
MemberFunction(MemberFunction),
MethodList(MethodList),
MethodListEntry(MethodListEntry),
Nested(Nested),
OverloadedMethod(OverloadedMethod),
Method(Method),
StaticMember(StaticMember),
BaseClass(BaseClass),
VTable(VTable),
}
impl Typed for Type {
fn type_size(&self, pdb: &ParsedPdb) -> usize {
match self {
Type::Class(class) => class.type_size(pdb),
Type::Union(union) => union.type_size(pdb),
Type::Bitfield(bitfield) => bitfield.underlying_type.borrow().type_size(pdb),
Type::Enumeration(e) => e.underlying_type.borrow().type_size(pdb),
Type::Pointer(p) => p.attributes.kind.type_size(pdb),
Type::Primitive(p) => p.type_size(pdb),
Type::Array(a) => a.type_size(pdb),
Type::FieldList(fields) => fields
.0
.iter()
.fold(0, |acc, field| acc + field.borrow().type_size(pdb)),
Type::EnumVariant(_) => panic!("type_size() invoked for EnumVariant"),
Type::Modifier(modifier) => modifier.underlying_type.borrow().type_size(pdb),
Type::Member(_) => panic!("type_size() invoked for Member"),
Type::ArgumentList(_) => panic!("type_size() invoked for ArgumentList"),
Type::Procedure(_) => panic!("type_size() invoked for Procedure"),
Type::MemberFunction(_) => panic!("type_size() invoked for MemberFunction"),
Type::MethodList(_) => panic!("type_size() invoked for MethodList"),
Type::MethodListEntry(_) => panic!("type_size() invoked for MethodListEntry"),
Type::VirtualBaseClass(_) => panic!("type_size() invoked for VirtualBaseClass"),
Type::Nested(_) => panic!("type_size() invoked for Nested"),
Type::OverloadedMethod(_) => panic!("type_size() invoked for overloaded method"),
Type::Method(_) => panic!("type_size() invoked for overloaded method"),
Type::StaticMember(_) => panic!("type_size() invoked for StaticMember"),
Type::VTable(_) => panic!("type_size() invoked for VTable"),
Type::BaseClass(_) => panic!("type_size() invoked for BaseClass"),
}
}
fn on_complete(&mut self, pdb: &ParsedPdb) {
match self {
Type::Class(class) => class.on_complete(pdb),
Type::Union(union) => union.on_complete(pdb),
Type::Array(a) => a.on_complete(pdb),
_ => {}
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct TypeProperties {
pub packed: bool,
pub constructors: bool,
pub overlapped_operators: bool,
pub is_nested_type: bool,
pub contains_nested_types: bool,
pub overload_assignment: bool,
pub overload_coasting: bool,
pub forward_reference: bool,
pub scoped_definition: bool,
pub has_unique_name: bool,
pub sealed: bool,
pub hfa: u8,
pub intristic_type: bool,
pub mocom: u8,
}
impl TryFrom<pdb::TypeProperties> for TypeProperties {
type Error = Error;
fn try_from(props: pdb::TypeProperties) -> Result<Self, Self::Error> {
Ok(TypeProperties {
packed: props.packed(),
constructors: props.constructors(),
overlapped_operators: props.overloaded_operators(),
is_nested_type: props.is_nested_type(),
contains_nested_types: props.contains_nested_types(),
overload_assignment: props.overloaded_assignment(),
overload_coasting: props.overloaded_casting(),
forward_reference: props.forward_reference(),
scoped_definition: props.scoped_definition(),
has_unique_name: props.has_unique_name(),
sealed: props.sealed(),
hfa: props.hfa(),
intristic_type: props.intrinsic_type(),
mocom: props.mocom(),
})
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Class {
pub name: String,
pub unique_name: Option<String>,
pub kind: ClassKind,
pub properties: TypeProperties,
pub derived_from: Option<TypeRef>,
pub fields: Vec<TypeRef>,
pub size: usize,
}
impl Typed for Class {
fn type_size(&self, pdb: &ParsedPdb) -> usize {
if self.properties.forward_reference {
// Find the implementation
for value in pdb.types.values() {
if let Ok(borrow) = value.as_ref().try_borrow() {
if let Type::Class(class) = &*borrow {
if !class.properties.forward_reference
&& class.unique_name == self.unique_name
{
return class.type_size(pdb);
}
}
}
}
warn!("could not get forward reference for {}", self.name);
}
self.size
}
}
type FromClass<'a, 'b> = (
&'b pdb::ClassType<'a>,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromClass<'_, '_>> for Class {
type Error = Error;
fn try_from(info: FromClass<'_, '_>) -> Result<Self, Self::Error> {
let (class, type_finder, output_pdb) = info;
let pdb::ClassType {
kind,
count,
properties,
fields,
derived_from,
vtable_shape,
size,
name,
unique_name,
} = *class;
let fields: Vec<TypeRef> = match fields {
Some(type_index) => {
// TODO: perhaps change FieldList to Rc<Vec<TypeRef>?
if let Type::FieldList(fields) =
&*crate::handle_type(type_index, output_pdb, type_finder)?
.as_ref()
.borrow()
{
fields.0.clone()
} else {
panic!("got an unexpected type when FieldList was expected")
}
}
None => vec![],
};
let derived_from = derived_from.map(|type_index| {
crate::handle_type(type_index, output_pdb, type_finder)
.expect("failed to resolve dependent type")
});
let unique_name = unique_name.map(|s| s.to_string().into_owned());
Ok(Class {
name: name.to_string().into_owned(),
unique_name,
kind: kind.try_into()?,
properties: properties.try_into()?,
derived_from,
fields,
size: size as usize,
})
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct BaseClass {
pub kind: ClassKind,
pub base_class: TypeRef,
pub offset: usize,
}
type FromBaseClass<'a, 'b> = (
&'b pdb::BaseClassType,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromBaseClass<'_, '_>> for BaseClass {
type Error = Error;
fn try_from(info: FromBaseClass<'_, '_>) -> Result<Self, Self::Error> {
let (class, type_finder, output_pdb) = info;
let pdb::BaseClassType {
kind,
attributes,
base_class,
offset,
} = *class;
let base_class = crate::handle_type(base_class, output_pdb, type_finder)?;
Ok(BaseClass {
kind: kind.try_into()?,
base_class,
offset: offset as usize,
})
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct VirtualBaseClass {
pub direct: bool,
pub base_class: TypeRef,
pub base_pointer: TypeRef,
pub base_pointer_offset: usize,
pub virtual_base_offset: usize,
}
type FromVirtualBaseClass<'a, 'b> = (
&'b pdb::VirtualBaseClassType,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromVirtualBaseClass<'_, '_>> for VirtualBaseClass {
type Error = Error;
fn try_from(info: FromVirtualBaseClass<'_, '_>) -> Result<Self, Self::Error> {
let (class, type_finder, output_pdb) = info;
let pdb::VirtualBaseClassType {
direct,
attributes,
base_class,
base_pointer,
base_pointer_offset,
virtual_base_offset,
} = *class;
let base_class = crate::handle_type(base_class, output_pdb, type_finder)
.expect("failed to resolve underlying type");
let base_pointer = crate::handle_type(base_pointer, output_pdb, type_finder)
.expect("failed to resolve underlying type");
Ok(VirtualBaseClass {
direct,
base_class,
base_pointer,
base_pointer_offset: base_pointer_offset as usize,
virtual_base_offset: virtual_base_offset as usize,
})
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum ClassKind {
Class,
Struct,
Interface,
}
impl TryFrom<pdb::ClassKind> for ClassKind {
type Error = Error;
fn try_from(kind: pdb::ClassKind) -> Result<Self, Self::Error> {
Ok(match kind {
pdb::ClassKind::Class => ClassKind::Class,
pdb::ClassKind::Struct => ClassKind::Struct,
pdb::ClassKind::Interface => ClassKind::Interface,
})
}
}
impl std::fmt::Display for ClassKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ClassKind::Class => write!(f, "Class"),
ClassKind::Struct => write!(f, "Struct"),
ClassKind::Interface => write!(f, "Interface"),
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Union {
pub name: String,
pub unique_name: Option<String>,
pub properties: TypeProperties,
pub size: usize,
pub count: usize,
pub fields: Vec<TypeRef>,
}
impl Typed for Union {
fn type_size(&self, pdb: &ParsedPdb) -> usize {
if self.properties.forward_reference {
// Find the implementation
for (_key, value) in &pdb.types {
if let Some(value) = value.as_ref().try_borrow().ok() {
if let Type::Union(union) = &*value {
if !union.properties.forward_reference
&& union.unique_name == self.unique_name
{
return union.type_size(pdb);
}
}
}
}
warn!("could not get forward reference for {}", self.name);
}
self.size
}
}
type FromUnion<'a, 'b> = (
&'b pdb::UnionType<'a>,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromUnion<'_, '_>> for Union {
type Error = Error;
fn try_from(data: FromUnion<'_, '_>) -> Result<Self, Self::Error> {
let (union, type_finder, output_pdb) = data;
let pdb::UnionType {
count,
properties,
size,
fields,
name,
unique_name,
} = union;
let fields_type = crate::handle_type(*fields, output_pdb, type_finder)?;
let fields;
let borrowed_fields = fields_type.as_ref().borrow();
match &*borrowed_fields {
Type::FieldList(fields_list) => {
fields = fields_list.0.clone();
}
_ => {
drop(borrowed_fields);
fields = vec![fields_type];
}
}
let union = Union {
name: name.to_string().into_owned(),
unique_name: unique_name.map(|s| s.to_string().into_owned()),
properties: (*properties).try_into()?,
size: *size as usize,
count: *count as usize,
fields,
};
Ok(union)
}
}
type FromBitfield<'a, 'b> = (
&'b pdb::BitfieldType,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Bitfield {
pub underlying_type: TypeRef,
pub len: usize,
pub position: usize,
}
impl TryFrom<FromBitfield<'_, '_>> for Bitfield {
type Error = Error;
fn try_from(data: FromBitfield<'_, '_>) -> Result<Self, Self::Error> {
let (bitfield, type_finder, output_pdb) = data;
let pdb::BitfieldType {
underlying_type,
length,
position,
} = *bitfield;
let underlying_type = crate::handle_type(underlying_type, output_pdb, type_finder)?;
Ok(Bitfield {
underlying_type,
len: length as usize,
position: position as usize,
})
}
}
impl Typed for Bitfield {
fn type_size(&self, pdb: &ParsedPdb) -> usize {
panic!("calling type_size() directly on a bitfield is probably not what you want");
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Enumeration {
pub name: String,
pub unique_name: Option<String>,
pub underlying_type: TypeRef,
pub variants: Vec<EnumVariant>,
pub properties: TypeProperties,
}
type FromEnumeration<'a, 'b> = (
&'b pdb::EnumerationType<'a>,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromEnumeration<'_, '_>> for Enumeration {
type Error = Error;
fn try_from(data: FromEnumeration<'_, '_>) -> Result<Self, Self::Error> {
let (e, type_finder, output_pdb) = data;
let pdb::EnumerationType {
count,
properties,
underlying_type,
fields,
name,
unique_name,
} = *e;
let underlying_type = crate::handle_type(underlying_type, output_pdb, type_finder)?;
let fields_type = crate::handle_type(fields, output_pdb, type_finder)?;
let fields;
let borrowed_fields = fields_type.as_ref().borrow();
match &*borrowed_fields {
Type::FieldList(fields_list) => {
fields = fields_list.0.clone();
}
_other => {
fields = vec![];
}
}
let fields = fields
.iter()
.map(|field| {
if let Type::EnumVariant(var) = &*field.borrow() {
var.clone()
} else {
panic!("field {:?} is not an enumvariant", field)
}
})
.collect::<Vec<_>>();
Ok(Enumeration {
name: name.to_string().into_owned(),
unique_name: unique_name.map(|s| s.to_string().into_owned()),
underlying_type,
variants: fields,
properties: properties.try_into()?,
})
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct EnumVariant {
pub name: String,
pub value: VariantValue,
}
type FromEnumerate<'a, 'b> = &'b pdb::EnumerateType<'a>;
impl TryFrom<FromEnumerate<'_, '_>> for EnumVariant {
type Error = Error;
fn try_from(data: FromEnumerate<'_, '_>) -> Result<Self, Self::Error> {
let e = data;
let pdb::EnumerateType {
attributes,
value,
name,
} = e;
Ok(Self {
name: name.to_string().into_owned(),
value: value.try_into()?,
})
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum VariantValue {
U8(u8),
U16(u16),
U32(u32),
U64(u64),
I8(i8),
I16(i16),
I32(i32),
I64(i64),
}
type FromVariant = pdb::Variant;
impl TryFrom<&FromVariant> for VariantValue {
type Error = Error;
fn try_from(data: &FromVariant) -> Result<Self, Self::Error> {
let variant = data;
let value = match *variant {
pdb::Variant::U8(val) => VariantValue::U8(val),
pdb::Variant::U16(val) => VariantValue::U16(val),
pdb::Variant::U32(val) => VariantValue::U32(val),
pdb::Variant::U64(val) => VariantValue::U64(val),
pdb::Variant::I8(val) => VariantValue::I8(val),
pdb::Variant::I16(val) => VariantValue::I16(val),
pdb::Variant::I32(val) => VariantValue::I32(val),
pdb::Variant::I64(val) => VariantValue::I64(val),
};
Ok(value)
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Pointer {
pub underlying_type: Option<TypeRef>,
pub attributes: PointerAttributes,
}
type FromPointer<'a, 'b> = (
&'b pdb::PointerType,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromPointer<'_, '_>> for Pointer {
type Error = Error;
fn try_from(data: FromPointer<'_, '_>) -> Result<Self, Self::Error> {
let (pointer, type_finder, output_pdb) = data;
let pdb::PointerType {
underlying_type,
attributes,
containing_class,
} = *pointer;
let underlying_type = crate::handle_type(underlying_type, output_pdb, type_finder).ok();
Ok(Pointer {
underlying_type,
attributes: attributes.try_into()?,
})
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum PointerKind {
Near16,
Far16,
Huge16,
BaseSeg,
BaseVal,
BaseSegVal,
BaseAddr,
BaseSegAddr,
BaseType,
BaseSelf,
Near32,
Far32,
Ptr64,
}
impl TryFrom<pdb::PointerKind> for PointerKind {
type Error = Error;
fn try_from(kind: pdb::PointerKind) -> Result<Self, Self::Error> {
let kind = match kind {
pdb::PointerKind::Near16 => PointerKind::Near16,
pdb::PointerKind::Far16 => PointerKind::Far16,
pdb::PointerKind::Huge16 => PointerKind::Huge16,
pdb::PointerKind::BaseSeg => PointerKind::BaseSeg,
pdb::PointerKind::BaseVal => PointerKind::BaseVal,
pdb::PointerKind::BaseSegVal => PointerKind::BaseSegVal,
pdb::PointerKind::BaseAddr => PointerKind::BaseAddr,
pdb::PointerKind::BaseSegAddr => PointerKind::BaseSegAddr,
pdb::PointerKind::BaseType => PointerKind::BaseType,
pdb::PointerKind::BaseSelf => PointerKind::BaseSelf,
pdb::PointerKind::Near32 => PointerKind::Near32,
pdb::PointerKind::Far32 => PointerKind::Far32,
pdb::PointerKind::Ptr64 => PointerKind::Ptr64,
};
Ok(kind)
}
}
impl Typed for PointerKind {
fn type_size(&self, _pdb: &ParsedPdb) -> usize {
match self {
PointerKind::Near16 | PointerKind::Far16 | PointerKind::Huge16 => 2,
PointerKind::Near32 | PointerKind::Far32 => 4,
PointerKind::Ptr64 => 8,
other => panic!("type_size() not implemented for pointer type: {:?}", other),
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct PointerAttributes {
pub kind: PointerKind,
pub is_volatile: bool,
pub is_const: bool,
pub is_unaligned: bool,
pub is_restrict: bool,
pub is_reference: bool,
pub size: usize,
pub is_mocom: bool,
}
impl TryFrom<pdb::PointerAttributes> for PointerAttributes {
type Error = Error;
fn try_from(attr: pdb::PointerAttributes) -> Result<Self, Self::Error> {
let attr = PointerAttributes {
kind: attr.pointer_kind().try_into()?,
is_volatile: attr.is_volatile(),
is_const: attr.is_const(),
is_unaligned: attr.is_unaligned(),
is_restrict: attr.is_restrict(),
is_reference: attr.is_reference(),
size: attr.size() as usize,
is_mocom: attr.is_mocom(),
};
Ok(attr)
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Primitive {
pub kind: PrimitiveKind,
pub indirection: Option<Indirection>,
}
impl TryFrom<&pdb::PrimitiveType> for Primitive {
type Error = Error;
fn try_from(typ: &pdb::PrimitiveType) -> Result<Self, Self::Error> {
let pdb::PrimitiveType { kind, indirection } = typ;
let prim = Primitive {
kind: kind.try_into()?,
indirection: indirection.map(|i| i.try_into()).transpose()?,
};
Ok(prim)
}
}
impl Typed for Primitive {
fn type_size(&self, pdb: &ParsedPdb) -> usize {
self.size()
}
}
impl Primitive {
pub fn size(&self) -> usize {
if let Some(indirection) = self.indirection.as_ref() {
return indirection.size();
}
return self.kind.size();
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum Indirection {
Near16,
Far16,
Huge16,
Near32,
Far32,
Near64,
Near128,
}
impl TryFrom<pdb::Indirection> for Indirection {
type Error = Error;
fn try_from(kind: pdb::Indirection) -> Result<Self, Self::Error> {
let kind = match kind {
pdb::Indirection::Near16 => Indirection::Near16,
pdb::Indirection::Far16 => Indirection::Far16,
pdb::Indirection::Huge16 => Indirection::Huge16,
pdb::Indirection::Near32 => Indirection::Near32,
pdb::Indirection::Far32 => Indirection::Far32,
pdb::Indirection::Near64 => Indirection::Near64,
pdb::Indirection::Near128 => Indirection::Near128,
};
Ok(kind)
}
}
impl Typed for Indirection {
fn type_size(&self, _pdb: &ParsedPdb) -> usize {
self.size()
}
}
impl Indirection {
pub fn size(&self) -> usize {
match self {
Indirection::Near16 | Indirection::Far16 | Indirection::Huge16 => 2,
Indirection::Near32 | Indirection::Far32 => 4,
Indirection::Near64 => 8,
Indirection::Near128 => 8,
}
}
}
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub enum PrimitiveKind {
NoType,
Void,
Char,
UChar,
RChar,
WChar,
RChar16,
RChar32,
I8,
U8,
Short,
UShort,
I16,
U16,
Long,
ULong,
I32,
U32,
Quad,
UQuad,
I64,
U64,
Octa,
UOcta,
I128,
U128,
F16,
F32,
F32PP,
F48,
F64,
F80,
F128,
Complex32,
Complex64,
Complex80,
Complex128,
Bool8,
Bool16,
Bool32,
Bool64,
HRESULT,
}
impl TryFrom<&pdb::PrimitiveKind> for PrimitiveKind {
type Error = Error;
fn try_from(kind: &pdb::PrimitiveKind) -> Result<Self, Self::Error> {
let kind = match *kind {
pdb::PrimitiveKind::NoType => PrimitiveKind::NoType,
pdb::PrimitiveKind::Void => PrimitiveKind::Void,
pdb::PrimitiveKind::Char => PrimitiveKind::Char,
pdb::PrimitiveKind::UChar => PrimitiveKind::UChar,
pdb::PrimitiveKind::RChar => PrimitiveKind::RChar,
pdb::PrimitiveKind::WChar => PrimitiveKind::WChar,
pdb::PrimitiveKind::RChar16 => PrimitiveKind::RChar16,
pdb::PrimitiveKind::RChar32 => PrimitiveKind::RChar32,
pdb::PrimitiveKind::I8 => PrimitiveKind::I8,
pdb::PrimitiveKind::U8 => PrimitiveKind::U8,
pdb::PrimitiveKind::Short => PrimitiveKind::Short,
pdb::PrimitiveKind::UShort => PrimitiveKind::UShort,
pdb::PrimitiveKind::I16 => PrimitiveKind::I16,
pdb::PrimitiveKind::U16 => PrimitiveKind::U16,
pdb::PrimitiveKind::Long => PrimitiveKind::Long,
pdb::PrimitiveKind::ULong => PrimitiveKind::ULong,
pdb::PrimitiveKind::I32 => PrimitiveKind::I32,
pdb::PrimitiveKind::U32 => PrimitiveKind::U32,
pdb::PrimitiveKind::Quad => PrimitiveKind::Quad,
pdb::PrimitiveKind::UQuad => PrimitiveKind::UQuad,
pdb::PrimitiveKind::I64 => PrimitiveKind::I64,
pdb::PrimitiveKind::U64 => PrimitiveKind::U64,
pdb::PrimitiveKind::Octa => PrimitiveKind::Octa,
pdb::PrimitiveKind::UOcta => PrimitiveKind::UOcta,
pdb::PrimitiveKind::I128 => PrimitiveKind::I128,
pdb::PrimitiveKind::U128 => PrimitiveKind::U128,
pdb::PrimitiveKind::F16 => PrimitiveKind::F16,
pdb::PrimitiveKind::F32 => PrimitiveKind::F32,
pdb::PrimitiveKind::F32PP => PrimitiveKind::F32PP,
pdb::PrimitiveKind::F48 => PrimitiveKind::F48,
pdb::PrimitiveKind::F64 => PrimitiveKind::F64,
pdb::PrimitiveKind::F80 => PrimitiveKind::F80,
pdb::PrimitiveKind::F128 => PrimitiveKind::F128,
pdb::PrimitiveKind::Complex32 => PrimitiveKind::Complex32,
pdb::PrimitiveKind::Complex64 => PrimitiveKind::Complex64,
pdb::PrimitiveKind::Complex80 => PrimitiveKind::Complex80,
pdb::PrimitiveKind::Complex128 => PrimitiveKind::Complex128,
pdb::PrimitiveKind::Bool8 => PrimitiveKind::Bool8,
pdb::PrimitiveKind::Bool16 => PrimitiveKind::Bool16,
pdb::PrimitiveKind::Bool32 => PrimitiveKind::Bool32,
pdb::PrimitiveKind::Bool64 => PrimitiveKind::Bool64,
pdb::PrimitiveKind::HRESULT => PrimitiveKind::HRESULT,
other => return Err(Error::UnhandledType(format!("{:?}", other))),
};
Ok(kind)
}
}
impl Typed for PrimitiveKind {
fn type_size(&self, _pdb: &ParsedPdb) -> usize {
self.size()
}
}
impl PrimitiveKind {
pub fn size(&self) -> usize {
match self {
PrimitiveKind::NoType | PrimitiveKind::Void => 0,
PrimitiveKind::Char
| PrimitiveKind::UChar
| PrimitiveKind::RChar
| PrimitiveKind::I8
| PrimitiveKind::U8
| PrimitiveKind::Bool8 => 1,
PrimitiveKind::RChar16
| PrimitiveKind::WChar
| PrimitiveKind::Short
| PrimitiveKind::UShort
| PrimitiveKind::I16
| PrimitiveKind::U16
| PrimitiveKind::F16
| PrimitiveKind::Bool16 => 2,
PrimitiveKind::RChar32
| PrimitiveKind::Long
| PrimitiveKind::ULong
| PrimitiveKind::I32
| PrimitiveKind::U32
| PrimitiveKind::F32
| PrimitiveKind::F32PP
| PrimitiveKind::Bool32
| PrimitiveKind::HRESULT
| PrimitiveKind::Complex32 => 4,
PrimitiveKind::F48 => 6,
PrimitiveKind::Quad
| PrimitiveKind::UQuad
| PrimitiveKind::I64
| PrimitiveKind::U64
| PrimitiveKind::F64
| PrimitiveKind::Bool64
| PrimitiveKind::Complex64 => 8,
PrimitiveKind::Octa
| PrimitiveKind::UOcta
| PrimitiveKind::I128
| PrimitiveKind::U128 => 16,
PrimitiveKind::F80 | PrimitiveKind::Complex80 => 10,
PrimitiveKind::F128 | PrimitiveKind::Complex128 => 16,
}
}
}
impl std::fmt::Display for PrimitiveKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PrimitiveKind::NoType => write!(f, "NoType"),
PrimitiveKind::Void => write!(f, "Void"),
PrimitiveKind::Char => write!(f, "Char"),
PrimitiveKind::UChar => write!(f, "UChar"),
PrimitiveKind::RChar => write!(f, "RChar"),
PrimitiveKind::WChar => write!(f, "WChar"),
PrimitiveKind::RChar16 => write!(f, "RChar16"),
PrimitiveKind::RChar32 => write!(f, "RChar32"),
PrimitiveKind::I8 => write!(f, "I8"),
PrimitiveKind::U8 => write!(f, "U8"),
PrimitiveKind::Short => write!(f, "Short"),
PrimitiveKind::UShort => write!(f, "UShort"),
PrimitiveKind::I16 => write!(f, "I16"),
PrimitiveKind::U16 => write!(f, "U16"),
PrimitiveKind::Long => write!(f, "Long"),
PrimitiveKind::ULong => write!(f, "ULong"),
PrimitiveKind::I32 => write!(f, "I32"),
PrimitiveKind::U32 => write!(f, "U32"),
PrimitiveKind::Quad => write!(f, "Quad"),
PrimitiveKind::UQuad => write!(f, "UQuad"),
PrimitiveKind::I64 => write!(f, "I64"),
PrimitiveKind::U64 => write!(f, "U64"),
PrimitiveKind::Octa => write!(f, "Octa"),
PrimitiveKind::UOcta => write!(f, "UOcta"),
PrimitiveKind::I128 => write!(f, "I128"),
PrimitiveKind::U128 => write!(f, "U128"),
PrimitiveKind::F16 => write!(f, "F16"),
PrimitiveKind::F32 => write!(f, "F32"),
PrimitiveKind::F32PP => write!(f, "F32PP"),
PrimitiveKind::F48 => write!(f, "F48"),
PrimitiveKind::F64 => write!(f, "F64"),
PrimitiveKind::F80 => write!(f, "F80"),
PrimitiveKind::F128 => write!(f, "F128"),
PrimitiveKind::Complex32 => write!(f, "Complex32"),
PrimitiveKind::Complex64 => write!(f, "Complex64"),
PrimitiveKind::Complex80 => write!(f, "Complex80"),
PrimitiveKind::Complex128 => write!(f, "Complex128"),
PrimitiveKind::Bool8 => write!(f, "Bool8"),
PrimitiveKind::Bool16 => write!(f, "Bool16"),
PrimitiveKind::Bool32 => write!(f, "Bool32"),
PrimitiveKind::Bool64 => write!(f, "Bool64"),
PrimitiveKind::HRESULT => write!(f, "HRESULT"),
}
}
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize))]
pub struct Array {
pub element_type: TypeRef,
pub indexing_type: TypeRef,
pub stride: Option<u32>,
pub size: usize,
pub dimensions_bytes: Vec<usize>,
pub dimensions_elements: Vec<usize>,
}
impl Typed for Array {
fn type_size(&self, pdb: &ParsedPdb) -> usize {
self.size
}
fn on_complete(&mut self, pdb: &ParsedPdb) {
self.dimensions_elements.clear();
if self.size == 0 {
self.dimensions_elements.push(0);
return;
}
let mut running_size = self.element_type.as_ref().borrow().type_size(pdb);
for byte_size in &self.dimensions_bytes {
// TODO: may be incorrect behavior
if running_size == 0 {
continue;
}
let size = *byte_size / running_size;
self.dimensions_elements.push(size);
running_size = size;
}
}
}
type FromArray<'a, 'b> = (
&'b pdb::ArrayType,
&'b pdb::TypeFinder<'a>,
&'b mut crate::symbol_types::ParsedPdb,
);
impl TryFrom<FromArray<'_, '_>> for Array {
type Error = Error;
fn try_from(data: FromArray<'_, '_>) -> Result<Self, Self::Error> {
let (array, type_finder, output_pdb) = data;
let pdb::ArrayType {
element_type,
indexing_type,
stride,
dimensions,
} = array;
let element_type = crate::handle_type(*element_type, output_pdb, type_finder)?;
let indexing_type = crate::handle_type(*indexing_type, output_pdb, type_finder)?;
let size = *dimensions.last().unwrap() as usize;
let arr = Array {
element_type,
indexing_type,
stride: *stride,
size,
dimensions_bytes: dimensions.iter().map(|b| *b as usize).collect(),
dimensions_elements: Vec::with_capacity(dimensions.len()),
};
Ok(arr)
}
}
| rust | MIT | f9be6c394d93e8f2fd415df6ee15495aac316648 | 2026-01-04T20:20:57.282285Z | true |
landaire/pdbview | https://github.com/landaire/pdbview/blob/f9be6c394d93e8f2fd415df6ee15495aac316648/crates/bin/main.rs | crates/bin/main.rs | use std::path::PathBuf;
use std::str::FromStr;
use structopt::StructOpt;
use thiserror::Error;
mod output;
#[derive(Error, Debug)]
pub enum CliArgumentError {
#[error("the value `{1}` is not valid for the parameter `{0}`")]
InvalidValue(&'static str, String),
}
#[derive(StructOpt, Debug)]
#[structopt(name = "pdbview")]
struct Opt {
/// Print debug information
#[structopt(short, long)]
debug: bool,
/// Output format type. Options include: plain, json
#[structopt(short, long, default_value = "plain")]
format: OutputFormatType,
/// Base address of module in-memory. If provided, all "offset" fields
/// will be added to the provided base address
#[structopt(short, long)]
base_address: Option<usize>,
/// PDB file to process
#[structopt(name = "FILE", parse(from_os_str))]
file: PathBuf,
}
#[derive(Debug)]
enum OutputFormatType {
Plain,
Json,
}
impl FromStr for OutputFormatType {
type Err = CliArgumentError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let result = match s.to_ascii_lowercase().as_ref() {
"plain" => OutputFormatType::Plain,
"json" => OutputFormatType::Json,
_ => return Err(CliArgumentError::InvalidValue("format", s.to_string())),
};
Ok(result)
}
}
fn main() -> anyhow::Result<()> {
let opt = Opt::from_args();
if opt.debug {
simplelog::SimpleLogger::init(log::LevelFilter::Debug, simplelog::Config::default())?;
}
let parsed_pdb = ezpdb::parse_pdb(&opt.file, opt.base_address)?;
let stdout = std::io::stdout();
let mut stdout_lock = stdout.lock();
match opt.format {
OutputFormatType::Plain => output::print_plain(&mut stdout_lock, &parsed_pdb)?,
OutputFormatType::Json => output::print_json(&mut stdout_lock, &parsed_pdb)?,
}
Ok(())
}
| rust | MIT | f9be6c394d93e8f2fd415df6ee15495aac316648 | 2026-01-04T20:20:57.282285Z | false |
landaire/pdbview | https://github.com/landaire/pdbview/blob/f9be6c394d93e8f2fd415df6ee15495aac316648/crates/bin/output.rs | crates/bin/output.rs | use ezpdb::symbol_types::*;
use ezpdb::type_info::*;
use log::{debug, warn};
use std::io::{self, Write};
pub fn print_plain(output: &mut impl Write, pdb_info: &ParsedPdb) -> io::Result<()> {
// region: Header info
// Print header information
writeln!(output, "{:?}:", &pdb_info.path)?;
writeln!(output, "PDB Version: {:?}", pdb_info.version)?;
writeln!(
output,
"Machine Type: {}",
pdb_info
.machine_type
.as_ref()
.map(|ty| format!("{:?}", ty))
.unwrap_or_else(|| "Unknown".to_string())
)?;
writeln!(output, "Assembly Info:")?;
writeln!(output, "\tBuild Info:")?;
writeln!(output, "\tCompiler Info:")?;
let width = 40usize;
if let Some(compiler_info) = &pdb_info.assembly_info.compiler_info {
writeln!(output, "\t\tLanguage: {}", compiler_info.language)?;
let CompileFlags {
edit_and_continue,
no_debug_info,
link_time_codegen,
no_data_align,
managed,
security_checks,
hot_patch,
cvtcil,
msil_module,
sdl,
pgo,
exp_module,
} = compiler_info.flags;
writeln!(output, "\t\tFlags:")?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Edit and continue:",
edit_and_continue,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"No debug info:",
no_debug_info,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Link-time codegen (LTCG):",
link_time_codegen,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"No data align (/bzalign):",
no_data_align,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Managed code or data is present:",
managed,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Security checks (/GS):",
security_checks,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Hot patching (/hotpatch):",
hot_patch,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"CvtCIL:",
cvtcil,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Is MSIL .NET module:",
msil_module,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Compiled with /SDL:",
sdl,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"PGO (`/ltcg:pgo` or `pgo:`):",
pgo,
width = width
)?;
writeln!(
output,
"\t\t\t{:width$} {}",
"Is .exp module:",
exp_module,
width = width
)?;
writeln!(output, "\t\tCPU type: {}", compiler_info.cpu_type,)?;
let CompilerVersion {
major,
minor,
build,
qfe,
} = compiler_info.frontend_version;
writeln!(
output,
"\t\tFrontend version: {}.{}.{}, QFE={}",
major,
minor,
build,
qfe.map(|qfe| format!("{}", qfe))
.unwrap_or_else(|| "None".to_string())
)?;
let CompilerVersion {
major,
minor,
build,
qfe,
} = compiler_info.backend_version;
writeln!(
output,
"\t\tBackend version: {}.{}.{}, QFE={}",
major,
minor,
build,
qfe.map(|qfe| format!("{}", qfe))
.unwrap_or_else(|| "None".to_string())
)?;
writeln!(
output,
"\t\tVersion string: {}",
compiler_info.version_string
)?;
}
// endregion
// region: Public symbols
writeln!(output, "Public symbols:")?;
writeln!(output, "\t{:<10} Name", "Offset")?;
for symbol in &pdb_info.public_symbols {
write!(output, "\t")?;
if let Some(offset) = symbol.offset {
write!(output, "0x{:08X} ", offset)?;
} else {
write!(output, "{:<10} ", "")?;
}
writeln!(output, "{}", symbol.name)?;
}
// endregion
// region: Procedures
writeln!(output, "Procedures:")?;
writeln!(
output,
"\t{:<10} {:<10} {:<15} {:<15} {:<10}",
"Offset", "Length", "Prologue End", "Epilogue Start", "Name"
)?;
for procedure in &pdb_info.procedures {
write!(output, "\t")?;
if let Some(address) = procedure.address {
write!(output, "0x{:08X} ", address)?;
} else {
write!(output, "{:<10} ", "")?;
}
write!(output, "0x{:08X} ", procedure.len)?;
write!(
output,
"{:<15}",
format!("0x{:08X} ", procedure.prologue_end)
)?;
write!(
output,
"{:<15}",
format!("0x{:08X} ", procedure.epilogue_start)
)?;
writeln!(output, "{}", procedure.name)?;
}
// endregion
// region: Data
writeln!(output, "Globals:")?;
writeln!(output, "\t{:<10} {:<10}", "Offset", "Name")?;
for global in &pdb_info.global_data {
write!(output, "\t")?;
if let Some(offset) = global.offset {
write!(output, "0x{:08X} ", offset)?;
} else {
write!(output, "{:<10} ", "")?;
}
writeln!(output, "{}", global.name)?;
let ty: &Type = &*global.ty.as_ref().borrow();
writeln!(output, "\t\tType: {}", format_type_name(ty))?;
writeln!(output, "\t\tSize: 0x{:X}", ty.type_size(pdb_info))?;
writeln!(output, "\t\tIs Managed: {}", global.is_managed)?;
}
// endregion
// region: Types
writeln!(output)?;
writeln!(output, "Types:")?;
let width = 20usize;
for ty in pdb_info.types.values() {
let ty: &Type = &*ty.as_ref().borrow();
match ty {
Type::Class(class) => {
if class.properties.forward_reference {
continue;
}
writeln!(
output,
"\t{:width$} {} {}",
class.kind,
class.name,
class.unique_name.as_ref().map(String::as_ref).unwrap_or(""),
width = 10
)?;
writeln!(output, "\tSize: 0x{:X}", class.size)?;
// writeln!(
// output,
// "\t\t{:width$} {}",
// "Name:",
// class.name,
// width = width
// )?;
// writeln!(
// output,
// "\t\t{:width$} {}",
// "Unique name:",
// class.unique_name.as_ref().map(String::as_ref).unwrap_or(""),
// width = width
// )?;
writeln!(output, "\tFields:")?;
for field in &class.fields {
let field: &Type = &*field.as_ref().borrow();
match field {
Type::Member(member) => {
let member_ty: &Type = &*member.underlying_type.as_ref().borrow();
writeln!(
output,
"\t\t0x{:04X} {:width$} {}",
member.offset,
member.name,
format_type_name(member_ty),
width = width
)?;
}
Type::BaseClass(base) => {
writeln!(
output,
"\t\t0x{:04X} <BaseClass> {}",
base.offset,
format_type_name(&*base.base_class.as_ref().borrow())
)?;
}
Type::VirtualBaseClass(_) => {
// ignore
}
Type::Nested(_nested) => {
// writeln!(
// output,
// "\t\t (NestedType) {} {}",
// nested.name,
// format_type_name(&*nested.nested_type.as_ref().borrow())
// )?;
}
Type::Method(_) | Type::OverloadedMethod(_) => {
// ignore methods
}
Type::VTable(_) => {
// ignore vtable
}
Type::StaticMember(_) => {
// ignore
}
other => {
debug!("Unexpected field type present in class: {:?}", other)
}
}
}
}
Type::Union(union) => {
if union.properties.forward_reference {
continue;
}
writeln!(
output,
"\tUnion {} {}",
union.name,
union.unique_name.as_ref().map(String::as_ref).unwrap_or(""),
)?;
writeln!(output, "\tSize: 0x{:X}", union.size)?;
// writeln!(
// output,
// "\t\t{:width$} {}",
// "Name:",
// class.name,
// width = width
// )?;
// writeln!(
// output,
// "\t\t{:width$} {}",
// "Unique name:",
// class.unique_name.as_ref().map(String::as_ref).unwrap_or(""),
// width = width
// )?;
writeln!(output, "\tFields:")?;
for field in &union.fields {
let field: &Type = &*field.as_ref().borrow();
match field {
Type::Member(member) => {
let member_ty: &Type = &*member.underlying_type.as_ref().borrow();
writeln!(
output,
"\t\t0x{:04X} {:width$} {}",
member.offset,
member.name,
format_type_name(member_ty),
width = width
)?;
}
Type::BaseClass(base) => {
writeln!(
output,
"\t\t0x{:04X} <BaseClass> {}",
base.offset,
format_type_name(&*base.base_class.as_ref().borrow())
)?;
}
Type::VirtualBaseClass(_) => {
// ignore
}
Type::Nested(_nested) => {
// ignore nested types
// writeln!(
// output,
// "\t\t (NestedType) {} {}",
// nested.name,
// format_type_name(&*nested.nested_type.as_ref().borrow())
// )?;
}
Type::Method(_) | Type::OverloadedMethod(_) => {
// ignore methods
}
Type::VTable(_) => {
// ignore vtable
}
Type::StaticMember(_) => {
// ignore
}
other => {
debug!("Unexpected field type present in class: {:?}", other)
}
}
}
}
Type::Enumeration(e) => {
if e.properties.forward_reference {
continue;
}
writeln!(
output,
"\tEnum {} {}",
e.name,
e.unique_name.as_ref().map(String::as_ref).unwrap_or(""),
)?;
if let Type::Primitive(primitive) = &*e.underlying_type.borrow() {
writeln!(output, "\tSize: 0x{:X}", primitive.size())?;
}
let underlying_type = e.underlying_type.borrow();
writeln!(output, "\tType: {}", format_type_name(&*underlying_type))?;
writeln!(output, "\tVariants:")?;
for variant in &e.variants {
let value = match variant.value {
VariantValue::U8(v) => v as u64,
VariantValue::U16(v) => v as u64,
VariantValue::U32(v) => v as u64,
VariantValue::U64(v) => v as u64,
VariantValue::I8(v) => v as u64,
VariantValue::I16(v) => v as u64,
VariantValue::I32(v) => v as u64,
VariantValue::I64(v) => v as u64,
};
writeln!(output, "\t\t0x{:08X} {}", value, variant.name)?;
}
}
_ => {
continue;
}
}
writeln!(output)?;
}
// endregion
Ok(())
}
fn format_type_name(ty: &Type) -> String {
match ty {
Type::Class(class) => class.name.clone(),
Type::Union(union) => union.name.clone(),
Type::Array(array) => format!(
"{}{}",
format_type_name(&*array.element_type.as_ref().borrow()),
array
.dimensions_elements
.iter()
.fold(String::new(), |accum, dimension| format!(
"{}[0x{:X}]",
accum, dimension
))
),
Type::Pointer(pointer) => {
// TODO: Attributes
match pointer.underlying_type.as_ref() {
Some(underlying_type) => {
format!("{}*", format_type_name(&*underlying_type.as_ref().borrow()))
}
None => "<UNRESOLVED_POINTER_TYPE>".to_string(),
}
}
Type::Primitive(primitive) => match primitive.kind {
PrimitiveKind::Void => "void".to_string(),
PrimitiveKind::Char | PrimitiveKind::RChar => "char".to_string(),
PrimitiveKind::UChar => "unsigned char".to_string(),
PrimitiveKind::I8 => "int8_t".to_string(),
PrimitiveKind::U8 => "uint8_t".to_string(),
PrimitiveKind::I16 | PrimitiveKind::Short => "int16_t".to_string(),
PrimitiveKind::U16 | PrimitiveKind::UShort => "uint16_t".to_string(),
PrimitiveKind::I32 | PrimitiveKind::Long => "int32_t".to_string(),
PrimitiveKind::U32 | PrimitiveKind::ULong => "uint32_t".to_string(),
PrimitiveKind::I64 | PrimitiveKind::Quad => "int64_t".to_string(),
PrimitiveKind::U64 | PrimitiveKind::UQuad => "uint64_t".to_string(),
PrimitiveKind::F32 => "float".to_string(),
PrimitiveKind::F64 => "double".to_string(),
PrimitiveKind::Bool8 => "bool".to_string(),
other => {
format!("{}", other)
}
},
Type::Modifier(modifier) => format_type_name(&*modifier.underlying_type.as_ref().borrow()),
Type::Bitfield(bitfield) => format!(
"{}:{}",
format_type_name(&*bitfield.underlying_type.as_ref().borrow()),
bitfield.len
),
Type::Procedure(proc) => format!(
"{} (*function){}",
format_type_name(&*proc.return_type.as_ref().unwrap().as_ref().borrow()),
proc.argument_list
.iter()
.fold(String::new(), |accum, argument| {
format!(
"{}{}{}",
&accum,
if accum.is_empty() { "" } else { "," },
format_type_name(&*argument.as_ref().borrow())
)
})
),
Type::Enumeration(e) => e.name.clone(),
Type::MemberFunction(member) => {
format!(
"{} (*function){}",
format_type_name(&*member.return_type.as_ref().borrow()),
member
.argument_list
.iter()
.fold(String::new(), |accum, argument| {
format!(
"{}{}{}",
&accum,
if accum.is_empty() { "" } else { "," },
format_type_name(&*argument.as_ref().borrow())
)
})
)
}
other => panic!("unimplemented type format: {:?}", other),
}
}
pub fn print_json(output: &mut impl Write, pdb_info: &ParsedPdb) -> io::Result<()> {
write!(output, "{}", serde_json::to_string(pdb_info)?)
}
| rust | MIT | f9be6c394d93e8f2fd415df6ee15495aac316648 | 2026-01-04T20:20:57.282285Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/lib.rs | src/lib.rs | //! Library for NBody simulation using the Barnes-Hut algorithm.
pub mod vector;
pub mod nbody;
pub mod quadtree;
pub use nbody::{nbody_direct, nbody_barnes_hut};
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/main.rs | src/main.rs | //! Executable to run the simulation locally in a Piston GUI window.
extern crate glutin_window;
extern crate graphics;
extern crate opengl_graphics;
extern crate piston;
use glutin_window::GlutinWindow as Window;
use opengl_graphics::{GlGraphics, OpenGL};
use piston::event_loop::*;
use piston::input::*;
use piston::window::WindowSettings;
mod vector;
mod nbody;
mod quadtree;
use vector::{
Scalar,
Vector,
Vector3D};
use nbody::{
generate_galaxy,
generate_blackhole,
nbody_barnes_hut,
MovingBody3D,
NBodyConfig3D,
NBodySimulation3D
};
/// Width of stars in the GUI
const STAR_WIDTH: f64 = 2.;
const BLACKHOLE_WIDTH: f64 = 18.;
const EVENTHORIZON_WIDTH: f64 = 20.;
const BLACK: [f32; 4] = [0., 0., 0., 0.0];
const SOLID_BLACK: [f32; 4] = [0., 0., 0., 1.0];
const RED1: [f32; 4] = [1., 0., 0., 0.1];
const WHITE7: [f32; 4] = [1., 1., 1., 0.7];
/// Piston App for GUI
pub struct App<'a> {
gl: GlGraphics, // OpenGL drawing backend.
sim: &'a mut NBodySimulation3D, // The simulation
}
/// Implementation of Piston App
impl App<'_> {
/// Renders a frame of the simulation
fn render(&mut self, args: &RenderArgs) {
use graphics::*;
let star_square = rectangle::square(0.0, 0.0, STAR_WIDTH);
let blackhole_square = rectangle::square(0.0, 0.0, BLACKHOLE_WIDTH);
let eventhorizon_square = rectangle::square(0.0, 0.0, EVENTHORIZON_WIDTH);
let sim = &mut self.sim;
self.gl.draw(args.viewport(), |c, gl| {
// Clear the screen.
clear(BLACK, gl);
// Plot all black holes
for i in 0..sim.config.num_blackholes {
let transform = c
.transform
.trans(sim.r[i].x as f64, sim.r[i].y as f64)
.trans(-EVENTHORIZON_WIDTH / 2., -EVENTHORIZON_WIDTH / 2.);
ellipse(RED1, eventhorizon_square, transform, gl);
let transform_center = c
.transform
.trans(sim.r[i].x as f64, sim.r[i].y as f64)
.trans(-BLACKHOLE_WIDTH / 2., -BLACKHOLE_WIDTH / 2.);
ellipse(SOLID_BLACK, blackhole_square, transform_center, gl);
}
// Plot all non-blackholes
for i in sim.config.num_blackholes..sim.n {
let transform = c
.transform
.trans(sim.r[i].x as f64, sim.r[i].y as f64)
.trans(-STAR_WIDTH / 2., -STAR_WIDTH / 2.);
ellipse(WHITE7, star_square, transform, gl);
}
});
}
/// Updates the simulation for one timestep.
fn update(&mut self, _args: &UpdateArgs) {
// nbody_direct(self.sim, 0.1);
nbody_barnes_hut(self.sim, 0.1, 1.);
}
fn click(&mut self, mouse_xy: &[f64; 2]) {
let (x, y) = (mouse_xy[0], mouse_xy[1]);
println!("Pressed mouse button ({}, {})", x, y);
// Center of galaxy.
let c: MovingBody3D = MovingBody3D {
r: Vector3D::from_xy(x as Scalar, y as Scalar),
v: Vector3D::zero(),
m: 5e6,
};
generate_blackhole(self.sim, &c);
}
}
/// Main routine
fn main() {
// Init the simulation
let min_dist: Scalar = 10.;
let min_r: Vector3D = Vector3D::from_xy(0., 0.);
let max_r: Vector3D = Vector3D::from_xy(500., 500.,);
let config = NBodyConfig3D::new(min_dist, min_r, max_r);
let mut sim: NBodySimulation3D = NBodySimulation3D::empty(1000, config);
// Center of galaxy.
let c: MovingBody3D = MovingBody3D {
r: Vector3D::from_xy(250., 250.),
v: Vector3D::zero(),
m: 5e6,
};
generate_galaxy(&mut sim, &c);
// nbody_barnes_hut(&mut sim, 0.1, 2.);
// nbody_barnes_hut(&mut sim, 0.1, 2.);
// nbody_barnes_hut(&mut sim, 0.1, 2.);
// return;
// Change this to OpenGL::V2_1 if not working.
let opengl = OpenGL::V3_2;
// Create an Glutin window.
let mut window: Window = WindowSettings::new("galaxy", [max_r.x as u32, max_r.y as u32])
.graphics_api(opengl)
.exit_on_esc(true)
.build()
.unwrap();
// Create a new game and run it.
let mut app = App {
gl: GlGraphics::new(opengl),
sim: &mut sim,
};
let mut events = Events::new(EventSettings::new());
let mut mouse_xy = [0., 0.];
while let Some(e) = events.next(&mut window) {
if let Some(Button::Mouse(_button)) = e.press_args() {
app.click(&mouse_xy);
}
if let Some(args) = e.render_args() {
app.render(&args);
}
if let Some(args) = e.update_args() {
app.update(&args);
}
// Track the mouse position
e.mouse_cursor(|pos| {
mouse_xy = pos
});
}
}
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/vector/base.rs | src/vector/base.rs | //! Custom 3D vector struct.
use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign};
use std::fmt::Debug;
/// Scalar type: configure floating point precision here.
pub type Scalar = f32;
/// Vector type that supports linear combinations, cloning, and l2 norm.
pub trait Vector:
Sized +
Copy +
Clone +
PartialEq +
Debug +
Add<Output = Self> +
AddAssign +
Mul<Scalar, Output = Self> +
MulAssign<Scalar> +
Sub<Output = Self> +
SubAssign {
fn zero() -> Self;
fn from_xy(x: Scalar, y: Scalar) -> Self;
fn to_xy(self) -> (Scalar, Scalar);
fn l2_sqrd(self) -> Scalar;
fn in_bounds(self, min: &Self, max: &Self) -> bool;
}
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/vector/vector3d.rs | src/vector/vector3d.rs | //! Custom 3D vector struct.
use std::fmt::Debug;
use std::ops::{Add, AddAssign, Mul, MulAssign, Sub, SubAssign};
use super::{Scalar, Vector};
/// Generic 3D vector.
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Vector3D {
pub x: Scalar,
pub y: Scalar,
pub z: Scalar,
}
impl Mul<Scalar> for Vector3D {
type Output = Self;
fn mul(self, rhs: Scalar) -> Self {
Self {
x: self.x * rhs,
y: self.y * rhs,
z: self.z * rhs,
}
}
}
impl MulAssign<Scalar> for Vector3D {
fn mul_assign(&mut self, rhs: Scalar) {
*self = Self {
x: self.x * rhs,
y: self.y * rhs,
z: self.z * rhs,
};
}
}
impl Add for Vector3D {
type Output = Self;
fn add(self, rhs: Self) -> Self {
Self {
x: self.x + rhs.x,
y: self.y + rhs.y,
z: self.z + rhs.z,
}
}
}
impl AddAssign for Vector3D {
fn add_assign(&mut self, rhs: Self) {
*self = Self {
x: self.x + rhs.x,
y: self.y + rhs.y,
z: self.z + rhs.z,
};
}
}
impl Sub for Vector3D {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
Self {
x: self.x - rhs.x,
y: self.y - rhs.y,
z: self.z - rhs.z,
}
}
}
impl SubAssign for Vector3D {
fn sub_assign(&mut self, rhs: Self) {
*self = Self {
x: self.x - rhs.x,
y: self.y - rhs.y,
z: self.z - rhs.z,
};
}
}
impl Vector for Vector3D {
fn zero() -> Self {
Self {
x: 0.,
y: 0.,
z: 0.
}
}
fn from_xy(x: Scalar, y: Scalar) -> Self {
Self {
x,
y,
z: 0.
}
}
fn to_xy(self) -> (Scalar, Scalar) {
(self.x, self.y)
}
fn l2_sqrd(self) -> Scalar {
self.x * self.x + self.y * self.y + self.z * self.z
}
fn in_bounds(self, min: &Self, max: &Self) -> bool {
self.x >= min.x && self.x <= max.x &&
self.y >= min.y && self.y <= max.y &&
self.z >= min.z && self.z <= max.z
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add_assign() {
let mut v1 = Vector3D { x: 1., y: 2., z: 3., };
let v2 = Vector3D { x: 2., y: 4., z: 6., };
let v3 = Vector3D { x: 3., y: 6., z: 9., };
v1 += v2;
assert_eq!(v1, v3);
}
#[test]
fn test_mul_assign() {
let mut v1 = Vector3D { x: 1., y: 2., z: 3., };
let v3 = Vector3D { x: 3., y: 6., z: 9., };
let s: Scalar = 3.;
v1 *= s;
assert_eq!(v1, v3);
}
#[test]
fn test_mul() {
let v1 = Vector3D { x: 1., y: 2., z: 3., };
let v3 = Vector3D { x: 3., y: 6., z: 9., };
let s: Scalar = 3.;
assert_eq!(v1 * s, v3);
}
#[test]
fn test_in_bounds() {
let min = Vector3D { x: 0., y: 0., z: 0., };
let max = Vector3D { x: 500., y: 500., z: 0., };
let r = Vector3D { x: 250., y: 250., z: 0., };
assert_eq!(r.in_bounds(&min, &max), true);
}
} | rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/vector/mod.rs | src/vector/mod.rs | //! Shared utilities.
pub mod base;
pub mod vector3d;
pub use self::base::{Scalar, Vector};
pub use self::vector3d::Vector3D;
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/nbody/simulation.rs | src/nbody/simulation.rs | //! Module for defining simulation of bodies (planets, etc.)
use rand::Rng;
use super::bodies::{Scalar, Vector, Vector3D, MovingBody};
use super::generators::{generate_satellite};
/// Class to configure a simulation
#[derive(Debug)]
pub struct NBodyConfig<V: Vector> {
pub min_dist: Scalar,
pub min_dist_sqrd: Scalar,
pub min_r: V,
pub max_r: V,
pub num_blackholes: usize,
}
impl<V: Vector> NBodyConfig<V> {
pub fn new(min_dist: Scalar, min_r: V, max_r: V) -> Self {
Self {
min_dist,
min_dist_sqrd: min_dist * min_dist,
min_r,
max_r,
num_blackholes: 0,
}
}
}
pub type NBodyConfig3D = NBodyConfig<Vector3D>;
/// Class defining the simulation for 2D n-body problem.
#[derive(Debug)]
pub struct NBodySimulation<V: Vector> {
pub n: usize,
pub m: Vec<Scalar>,
pub r: Vec<V>,
pub v: Vec<V>,
pub a: Vec<V>,
pub config: NBodyConfig<V>,
}
pub type NBodySimulation3D = NBodySimulation<Vector3D>;
impl<V: Vector> NBodySimulation<V> {
/// Constructs and empty `NBodySimulation2D` with n uninitialized bodies.
pub fn empty(n: usize, config: NBodyConfig<V>) -> Self {
let sim: Self = Self{
n,
m: vec![0.; n],
r: vec![V::zero(); n],
v: vec![V::zero(); n],
a: vec![V::zero(); n],
config,
};
sim
}
/// Sets a body in the simulation
pub fn set(&mut self, i: usize, body: &MovingBody<V>) {
self.m[i] = body.m;
self.r[i] = body.r;
self.v[i] = body.v;
self.a[i] = V::zero();
}
/// Gets a body from the simulation
pub fn get(&self, i: usize) -> MovingBody<V> {
MovingBody {
m: self.m[i],
r: self.r[i],
v: self.v[i],
}
}
/// Resets a particle based on its type
pub fn reset(&mut self, i: usize, ci: usize) {
let c = self.get(ci);
// If resetting a black hole, delete it
if i < self.config.num_blackholes {
// Reduce size of black hole portion of array
self.config.num_blackholes -= 1;
// Move the previous last black hole to the deleted black hole's position
self.set(i, &self.get(self.config.num_blackholes));
// Replace the last black hole with a satellite
self.set(self.config.num_blackholes, &generate_satellite(&c))
} else {
// Otherwise, repalce this star with a new satellite
self.set(i, &generate_satellite(&c));
}
}
/// Integrate velocity and position over time
pub fn integrate(&mut self, dt: Scalar) {
let mut rng = rand::thread_rng();
for i in 0..self.n {
// Update velocities
self.v[i] += self.a[i] * dt;
// Update positions
self.r[i] += self.v[i] * dt;
// Check for black hole intersections
for ci in 0..self.config.num_blackholes {
// Don't check for inteserctions against self
if i == ci { continue };
let c = self.get(ci);
let d = c.r - self.r[i];
let d_sqrd: Scalar = d.l2_sqrd();
if d_sqrd < self.config.min_dist_sqrd {
self.reset(i, ci);
}
}
// Check for out of bounds
if !self.r[i].in_bounds(&self.config.min_r, &self.config.max_r) {
// Don't reset if there are no black holes
if self.config.num_blackholes > 0 { continue }
// Pick a random black hold to put next to
let ci = rng.gen_range(0, self.config.num_blackholes);
self.reset(i, ci);
}
}
}
}
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/nbody/direct.rs | src/nbody/direct.rs | //! Direct algorithm using all-pairs force accumulation
use super::{NBodySimulation};
use crate::vector::{Scalar, Vector};
use std::f32;
/// Runs a single timestep of the simulation using the all-pairs calculation.
#[allow(dead_code)]
pub fn nbody_direct<V: Vector>(sim: &mut NBodySimulation<V>, dt: Scalar) {
for i in 0..sim.n {
sim.a[i] = V::zero();
for j in 0..sim.n {
let d = sim.r[j] - sim.r[i];
let d_sqrd: Scalar = d.l2_sqrd();
if d_sqrd < sim.config.min_dist_sqrd {
continue;
}
let inv_d_cubed: f32 = 1. / d_sqrd.powf(3.);
sim.a[i] += d * sim.m[j] * inv_d_cubed;
}
}
sim.integrate(dt);
}
#[cfg(test)]
mod test {
use crate::vector::{Scalar, Vector, Vector3D};
use crate::nbody::{NBodyConfig3D, NBodySimulation3D, MovingBody3D, generate_galaxy};
use super::{nbody_direct};
#[test]
fn test_direct() {
// Init the simulation
let min_dist: Scalar = 10.;
let min_r: Vector3D = Vector3D::from_xy(0., 0.);
let max_r: Vector3D = Vector3D::from_xy(500., 500.,);
let config = NBodyConfig3D::new(min_dist, min_r, max_r);
let mut sim: NBodySimulation3D = NBodySimulation3D::empty(10, config);
// Center of galaxy
let c: MovingBody3D = MovingBody3D {
r: Vector3D::from_xy(250., 250.),
v: Vector3D::zero(),
m: 5e6,
};
generate_galaxy(&mut sim, &c);
nbody_direct(&mut sim, 0.1);
}
} | rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/nbody/barnes_hut.rs | src/nbody/barnes_hut.rs | //! Barnes hut algorithm
use super::{NBodySimulation3D};
use crate::vector::{Scalar, Vector, Vector3D};
use crate::quadtree::{BoundingBox2D, MassQuadtree, MassQuadtreeIterator};
/// Runs a single timestep of the simulation using the Barnes-Hut algorithm.
pub fn nbody_barnes_hut(sim: &mut NBodySimulation3D, dt: Scalar, theta: Scalar) {
let (min_x, min_y) = sim.config.min_r.to_xy();
let (max_x, max_y) = sim.config.max_r.to_xy();
let bb: BoundingBox2D = BoundingBox2D { min_x, max_x, min_y, max_y, };
let quadtree: MassQuadtree = MassQuadtree::new(&sim.r, &sim.m, bb);
// println!("\n\nQuadtree: {:?}", quadtree);
// For each point
for i in 0..sim.n {
sim.a[i] = Vector3D::zero();
// println!("r[i] = ({}, {})", sim.rx[i], sim.ry[i]);
let quadtree_iter =
MassQuadtreeIterator::new(sim.r[i].x, sim.r[i].y, theta, &quadtree, bb);
// Get all points that are close enough to treat as individuals
for node in quadtree_iter {
let d = Vector3D {
x: node.x - sim.r[i].x,
y: node.y - sim.r[i].y,
z: 0.,
};
let d_sqrd: Scalar = d.l2_sqrd();
if d_sqrd < sim.config.min_dist_sqrd {
continue;
}
// if i == 0 { println!("Node: ({}, {}, {})", node.x, node.y, node.m); }
let inv_d_cubed: Scalar = 1. / d_sqrd.powf(3.);
sim.a[i] += d * node.m * inv_d_cubed;
}
// if i == 0 { println!(); }
}
sim.integrate(dt);
}
#[cfg(test)]
mod test {
use crate::vector::{Scalar, Vector, Vector3D};
use crate::nbody::{NBodyConfig3D, NBodySimulation3D, MovingBody3D, generate_galaxy};
use super::{nbody_barnes_hut};
#[test]
fn test_barnes_hut() {
// Init the simulation
let min_dist: Scalar = 10.;
let min_r: Vector3D = Vector3D::from_xy(0., 0.);
let max_r: Vector3D = Vector3D::from_xy(500., 500.,);
let config = NBodyConfig3D::new(min_dist, min_r, max_r);
let mut sim: NBodySimulation3D = NBodySimulation3D::empty(10, config);
// Center of galaxy.
let c: MovingBody3D = MovingBody3D {
r: Vector3D::from_xy(250., 250.),
v: Vector3D::zero(),
m: 5e6,
};
generate_galaxy(&mut sim, &c);
nbody_barnes_hut(&mut sim, 0.1, 2.);
}
} | rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/nbody/bodies.rs | src/nbody/bodies.rs | // Generic body definitions.
pub use crate::vector::{Scalar, Vector, Vector3D};
#[derive(Debug)]
pub struct Body<V: Vector> {
pub m: Scalar,
pub r: V,
}
#[derive(Debug)]
pub struct MovingBody<V: Vector> {
pub m: Scalar,
pub r: V,
pub v: V,
}
pub type MovingBody3D = MovingBody<Vector3D>;
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/nbody/mod.rs | src/nbody/mod.rs | //! N Body simulation
pub mod barnes_hut;
pub mod bodies;
pub mod direct;
pub mod generators;
pub mod simulation;
pub use crate::vector::Vector3D;
pub use self::barnes_hut::nbody_barnes_hut;
pub use self::bodies::{Body, MovingBody, MovingBody3D};
pub use self::direct::{nbody_direct};
pub use self::generators::{generate_galaxy, generate_satellite, generate_blackhole};
pub use self::simulation::{NBodyConfig, NBodyConfig3D, NBodySimulation, NBodySimulation3D};
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/nbody/generators.rs | src/nbody/generators.rs | use rand_distr::{Uniform, Distribution, Normal};
use std::f32::{consts::PI};
use super::simulation::{NBodySimulation};
use super::bodies::{Scalar, Vector, MovingBody};
// Generates a satelite around the galaxy center.
pub fn generate_satellite<V: Vector>(c: &MovingBody<V>) -> MovingBody<V> {
// Generate a randon polar coordinate and mass
let mut rng = rand::thread_rng();
let uniform: Uniform<Scalar> = Uniform::new(0., 2. * PI);
let r_norm: Normal<Scalar> = Normal::new(1., 1.).unwrap();
let m_norm: Normal<Scalar> = Normal::new(1., 1.).unwrap();
let theta: Scalar = uniform.sample(&mut rng);
let mut r: Scalar = r_norm.sample(&mut rng);
let mut m: Scalar = m_norm.sample(&mut rng);
r = Scalar::min(30. * r.abs() + 20., 250.);
m = Scalar::min(m.abs() + 1e-2, 3.);
// Calculate position
let (crx, cry) = c.r.to_xy();
let rx: Scalar = r * theta.cos() + crx;
let ry: Scalar = r * theta.sin() + cry;
// Calculate velocity, which should increase with center's mass, the
let dx: Scalar = crx - rx;
let dy: Scalar = cry - ry;
let d: Scalar = (dx * dx + dy * dy).sqrt();
let s: Scalar = 1.00025e0 * (c.m).sqrt() / r / r;
let vx: Scalar = s * dy / d;
let vy: Scalar = s * -dx / d;
MovingBody {
r: V::from_xy(rx, ry),
v: V::from_xy(vx, vy),
m,
}
}
/// Generates a simple galaxy
pub fn generate_blackhole<V: Vector>(sim: &mut NBodySimulation<V>, c: &MovingBody<V>) {
// Initialize with supermassive object in middle
sim.set(sim.config.num_blackholes, c);
sim.config.num_blackholes += 1;
}
/// Generates a simple galaxy
pub fn generate_galaxy<V: Vector>(sim: &mut NBodySimulation<V>, c: &MovingBody<V>) {
// Initialize with supermassive object in middle
sim.set(0, c);
sim.config.num_blackholes = 1;
// Add all other objects as satellites.
for i in 1..sim.n {
sim.set(i, &generate_satellite(c));
}
}
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/quadtree/tree.rs | src/quadtree/tree.rs | //! Quadtree that keeps track of centers of mass.
use super::BoundingBox2D;
use crate::vector::{Scalar, Vector3D};
const EPSILON: Scalar = 1e-4;
/// Computes the l2 norm of a 2d vector represented by x1, y1, x2, y2
fn l2(x1: Scalar, y1: Scalar, x2: Scalar, y2: Scalar) -> Scalar {
let dx: Scalar = x2 - x1;
let dy: Scalar = y2 - y1;
(dx * dx + dy * dy).sqrt()
}
/// Definition of the mass quadtree
#[derive(Debug)]
pub struct MassQuadtree {
pub x: Scalar,
pub y: Scalar,
pub m: Scalar,
pub children: Vec<Option<Self>>,
}
/// Implementation for the mass quadtree
impl MassQuadtree {
/// Constructs a child with no children
pub fn empty() -> Self {
Self {
x: 0.,
y: 0.,
m: 0.,
children: vec![None, None, None, None]
}
}
// Constructs a new child under a node
pub fn new_child(&mut self, quadrant: usize, x: Scalar, y: Scalar, m: Scalar) {
// println!("New child ({}, {}, {}) under ({}, {}, {}) in quad {}", x, y, m, self.x, self.y, self.m, quadrant);
self.children[quadrant] = Some(Self {
x,
y,
m,
children: vec![None, None, None, None]
})
}
/// Constructs a quadtree for the given bounds and list of points
pub fn new(r: &Vec<Vector3D>, m: &Vec<Scalar>, bb: BoundingBox2D) -> Self {
let mut root = Self::empty();
for i in 0..r.len() {
root.insert(r[i].x, r[i].y, m[i], bb);
}
root
}
// Updates the center of mass
pub fn update_com(&mut self, x: Scalar, y: Scalar, m: Scalar) {
let total_m: Scalar = self.m + m;
self.x = (self.m * self.x + m * x) / total_m;
self.y = (self.m * self.y + m * y) / total_m;
self.m = total_m;
}
/// Inserts a point into the quadtree.
pub fn insert(&mut self, x: Scalar, y: Scalar, m: Scalar, bb: BoundingBox2D) {
// Edge cases: if inserting empty objects or inserting the first element of the tree
if m == 0. { return }
if self.m == 0. { self.x = x; self.y = y; self.m = m; return }
// Find the parent to insert this node under
let mut parent: &mut Self = self;
let mut parent_bb: BoundingBox2D = bb;
let mut quadrant: usize = parent_bb.quadrant(x, y);
while let Some(_) = &mut parent.children[quadrant] {
// Update the parent's center of mass
parent.update_com(x, y, m);
// Update the bounding box while searching for new parents deeper in the tree
parent_bb = parent_bb.child(quadrant);
parent = parent.children[quadrant].as_mut().unwrap();
// Compute quadrant for next ieration
quadrant = parent_bb.quadrant(x, y);
}
// Leaves must be re-inserted
if parent.is_leaf() {
let (px, py, pm) = (parent.x, parent.y, parent.m);
// Edge case: if the parent is too close to the child, don't insert as child
if (px - x).abs() < EPSILON && (py - y).abs() < EPSILON { return }
// Find the center of mass between the two
parent.update_com(x, y, m);
let (cx, cy, cm) = (parent.x, parent.y, parent.m);
// Then split until the parent and child are in separate cells
let mut parent_quadrant = parent_bb.quadrant(px, py);
while quadrant == parent_quadrant {
// Create the cell containing both
parent.new_child(quadrant, cx, cy, cm);
parent = parent.children[quadrant].as_mut().unwrap();
// Split the center and continue down
parent_bb = parent_bb.child(quadrant);
quadrant = parent_bb.quadrant(x, y);
parent_quadrant = parent_bb.quadrant(px, py);
}
// Once the quadrants are different, insert the parent into its quadrant
parent.new_child(parent_quadrant, px, py, pm);
}
// Insert the new child in the correct quadrant
parent.new_child(quadrant, x, y, m);
}
/// Checks if this node is a leaf
pub fn is_leaf(&self) -> bool {
for child in &self.children {
if child.is_some() {
return false
}
}
true
}
}
/// Iterator for iterating over all nearby nodes of the tree
pub struct MassQuadtreeIterator<'a> {
x: Scalar,
y: Scalar,
theta: Scalar,
stack: Vec<(&'a MassQuadtree, BoundingBox2D)>
}
/// Implementation of the constructor for the mass quadtree iterator.
impl<'a> MassQuadtreeIterator<'a> {
/// Constructs a new iterator with the stack initialized to the root.
pub fn new(x: Scalar, y: Scalar, theta: Scalar, tree: &'a MassQuadtree, bb: BoundingBox2D) -> Self {
Self {
x,
y,
theta,
stack: vec![(tree, bb)]
}
}
}
/// Implements the iterator
impl<'a> Iterator for MassQuadtreeIterator<'a> {
type Item = &'a MassQuadtree;
/// Gets the next node that should count towards the force calculation for the current particle.
///
/// Whether a node is or isn't sufficiently far away from a body,
/// depends on the quotient s/d,
/// where s is the width of the region represented by the internal node,
/// and d is the distance between the body and the node's center of mass.
/// The node is sufficiently far away when this ratio is smaller than a threshold value θ.
/// The parameter θ determines the accuracy of the simulation;
/// larger values of θ increase the speed of the simulation but decreases its accuracy.
/// If θ = 0, no internal node is treated as a single body and the algorithm degenerates to a direct-sum algorithm.
fn next(&mut self) -> Option<&'a MassQuadtree> {
while !self.stack.is_empty() {
let (node, bb) = self.stack.pop()?;
let d: Scalar = l2(node.x, node.y, self.x, self.y);
let s: Scalar = bb.width();
if s / d < self.theta || node.is_leaf() { return Some(node) }
// If not far enough away, add children to the stack.
for (quadrant, child) in node.children.iter().enumerate() {
match child {
Some(child) => self.stack.push((child, bb.child(quadrant))),
None => (),
}
}
}
None
}
}
#[test]
fn test_quadtree() {
// x: 265.56293, y: 263.4189, m: 0.4261353
// x: 250.0, y: 250.0, m: 5000000.0
// Initialize the particles
let r: Vec<Vector3D> = vec![
Vector3D { x: 265.56293, y: 263.4189, z: 0. },
Vector3D { x: 250.0, y: 250.0, z: 0. },
// Vector3D { x: 400., y: 400., z: 0. },
];
// And their masses
let m: Vec<Scalar> = vec![
0.4261353,
5000000.0
// 10.,
];
// Create a quadtree in the bounding box (0,0),(500, 500)
let bb: BoundingBox2D = BoundingBox2D{min_x: 0., max_x: 500., min_y: 0., max_y: 500.};
let quadtree = MassQuadtree::new(&r, &m, bb);
println!("Tree: {:?}", quadtree);
// Pass the tree to the iterator in a box
let theta: Scalar = 0.5;
let quadtree_iter = MassQuadtreeIterator::new(250., 250., theta, &quadtree, bb);
// Iterate over all contributing nodes of the tree
for node in quadtree_iter {
println!("Node: ({}, {}, {})", node.x, node.y, node.m);
}
}
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/quadtree/mod.rs | src/quadtree/mod.rs | //! TODO
mod bb;
mod tree;
pub use self::bb::{BoundingBox2D};
pub use self::tree::{MassQuadtree, MassQuadtreeIterator};
// pub use plot;
// pub use build;
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
Katsutoshii/barnes-hut-rs | https://github.com/Katsutoshii/barnes-hut-rs/blob/2e91f934e8feefac69c4a5d9a568a48e798084d8/src/quadtree/bb.rs | src/quadtree/bb.rs | //! Defines a splitable bounding box
use crate::vector::Scalar;
/// Splitable bounding box in 2 dimensions.
#[derive(Debug, Clone, Copy)]
pub struct BoundingBox2D {
pub min_x: Scalar,
pub max_x: Scalar,
pub min_y: Scalar,
pub max_y: Scalar
}
/// implementation for a splitable bounding box in 2 dimensions.
impl BoundingBox2D {
/// Gets the center X position of the bounding box.
pub fn cx(&self) -> Scalar {
(self.max_x + self.min_x) / 2.
}
/// Gets the center Y position of the bounding box.
pub fn cy(&self) -> Scalar {
(self.max_y + self.min_y) / 2.
}
/// Gets the width of this bounding box (x direction).
pub fn width(&self) -> Scalar {
self.max_x - self.min_x
}
// Returns the quadtrant of a point
pub fn quadrant(&self, x: Scalar, y: Scalar) -> usize {
let x_bit = (x >= self.cx()) as usize;
let y_bit = (y >= self.cy()) as usize;
x_bit + (y_bit << 1)
}
/// Gets the subquadtrant of this bounding box.
/// The quadtrant number must be between 0 and 3.
/// The LSB represents left (0) or right (1) in the x direction.
/// The MSB represents left (0) or right (1) in the y direction.
pub fn child(&self, quadrant: usize) -> Self {
match quadrant {
0b00 => Self {
min_x: self.min_x,
max_x: self.cx(),
min_y: self.min_y,
max_y: self.cy()
},
0b01 => Self {
min_x: self.cx(),
max_x: self.max_x,
min_y: self.min_y,
max_y: self.cy()
},
0b10 => Self {
min_x: self.min_x,
max_x: self.cx(),
min_y: self.cy(),
max_y: self.max_y
},
0b11 => Self {
min_x: self.cx(),
max_x: self.max_x,
min_y: self.cy(),
max_y: self.max_y
},
_ => Self {
min_x: self.min_x,
max_x: self.max_x,
min_y: self.min_y,
max_y: self.max_y
},
}
}
}
| rust | MIT | 2e91f934e8feefac69c4a5d9a568a48e798084d8 | 2026-01-04T20:20:58.527532Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/config.rs | src/config.rs | use eyre::eyre;
use starknet::core::types::Felt;
use std::env::var;
use url::Url;
fn env_var_to_field_element(var_name: &str) -> Result<Felt, eyre::Error> {
let env_var = var(var_name).map_err(|_| eyre!("missing env var: {var_name}"))?;
Ok(Felt::from_hex(&env_var)?)
}
#[derive(Clone, Debug)]
/// Configuration for the Starknet RPC client.
pub struct KakarotRpcConfig {
/// Starknet network.
pub network_url: Url,
/// Kakarot contract address.
pub kakarot_address: Felt,
/// Uninitialized account class hash.
pub uninitialized_account_class_hash: Felt,
/// Account contract class hash.
pub account_contract_class_hash: Felt,
}
impl KakarotRpcConfig {
/// `STARKNET_NETWORK` environment variable should be set the URL of a `JsonRpc`
/// starknet provider, e.g. <https://starknet-goerli.g.alchemy.com/v2/some_key>.
pub fn from_env() -> eyre::Result<Self> {
Ok(Self {
network_url: Url::parse(&var("STARKNET_NETWORK")?)?,
kakarot_address: env_var_to_field_element("KAKAROT_ADDRESS")?,
uninitialized_account_class_hash: env_var_to_field_element("UNINITIALIZED_ACCOUNT_CLASS_HASH")?,
account_contract_class_hash: env_var_to_field_element("ACCOUNT_CONTRACT_CLASS_HASH")?,
})
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/lib.rs | src/lib.rs | #![cfg_attr(not(any(test, feature = "testing")), warn(unused_crate_dependencies))]
use opentelemetry as _;
use opentelemetry_otlp as _;
use opentelemetry_sdk as _;
use tracing_opentelemetry as _;
use tracing_subscriber as _;
pub mod providers {
pub mod alchemy_provider;
pub mod debug_provider;
pub mod eth_provider;
pub mod pool_provider;
pub mod sn_provider;
}
pub mod client;
pub mod config;
pub mod constants;
pub mod eth_rpc;
pub mod models;
pub mod pool;
pub mod prometheus_handler;
#[cfg(feature = "testing")]
pub mod test_utils;
pub mod tracing;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/main.rs | src/main.rs | use dotenvy::dotenv;
use eyre::Result;
use kakarot_rpc::{
client::EthClient,
constants::{KAKAROT_RPC_CONFIG, KKRT_BLOCK_GAS_LIMIT, RPC_CONFIG},
eth_rpc::{rpc::KakarotRpcModuleBuilder, run_server},
pool::{
constants::PRUNE_DURATION,
mempool::{maintain_transaction_pool, AccountManager},
},
providers::eth_provider::{
database::Database,
starknet::kakarot_core::{core::KakarotCoreReader, KAKAROT_ADDRESS},
},
};
use mongodb::options::{DatabaseOptions, ReadConcern, WriteConcern};
use opentelemetry_sdk::runtime::Tokio;
use reth_transaction_pool::PoolConfig;
use starknet::{
core::types::{BlockId, BlockTag, Felt},
providers::{jsonrpc::HttpTransport, JsonRpcClient},
};
use std::{env::var, str::FromStr, sync::Arc};
use tracing_opentelemetry::MetricsLayer;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer};
#[tokio::main]
async fn main() -> Result<()> {
// Environment variables are safe to use after this
dotenv().ok();
setup_tracing().expect("failed to start tracing and metrics");
let starknet_provider = JsonRpcClient::new(HttpTransport::new(KAKAROT_RPC_CONFIG.network_url.clone()));
// Setup the database
let db_client =
mongodb::Client::with_uri_str(var("MONGO_CONNECTION_STRING").expect("Missing MONGO_CONNECTION_STRING .env"))
.await?;
let db = Database::new(
db_client.database_with_options(
&var("MONGO_DATABASE_NAME").expect("Missing MONGO_DATABASE_NAME from .env"),
DatabaseOptions::builder()
.read_concern(ReadConcern::majority())
.write_concern(WriteConcern::majority())
.build(),
),
);
// Setup the eth provider
let starknet_provider = Arc::new(starknet_provider);
// Get the pool config
let contract_reader = KakarotCoreReader::new(*KAKAROT_ADDRESS, starknet_provider.clone());
let base_fee = contract_reader.get_base_fee().block_id(BlockId::Tag(BlockTag::Pending)).call().await?.base_fee;
let base_fee = base_fee.try_into()?;
let config =
PoolConfig { minimal_protocol_basefee: base_fee, gas_limit: KKRT_BLOCK_GAS_LIMIT, ..Default::default() };
// Init the Ethereum Client
let eth_client = EthClient::new(starknet_provider, config, db.clone());
let eth_client = Arc::new(eth_client);
// Start the relayer manager
let addresses =
var("RELAYERS_ADDRESSES")?.split(',').filter_map(|addr| Felt::from_str(addr).ok()).collect::<Vec<_>>();
AccountManager::new(addresses, Arc::clone(ð_client)).start();
// Start the maintenance of the mempool
maintain_transaction_pool(Arc::clone(ð_client), PRUNE_DURATION);
// Setup the RPC module
let kakarot_rpc_module = KakarotRpcModuleBuilder::new(eth_client).rpc_module()?;
// Start the RPC server
let (socket_addr, server_handle) = run_server(kakarot_rpc_module, RPC_CONFIG.clone()).await?;
let url = format!("http://{socket_addr}");
tracing::info!("RPC Server running on {url}...");
server_handle.stopped().await;
Ok(())
}
/// Set up the subscriber for tracing and metrics
fn setup_tracing() -> Result<()> {
// Prepare a tracer pipeline that exports to the OpenTelemetry collector,
// using tonic as the gRPC client. Using a batch exporter for better performance:
// https://docs.rs/opentelemetry-otlp/0.17.0/opentelemetry_otlp/#performance
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
.install_batch(Tokio)?;
// Set up the tracing layer with the OpenTelemetry tracer. A layer is a basic building block,
// in tracing, that allows to define behavior for collecting or recording trace data. Layers
// can be stacked on top of each other to create a pipeline.
// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/layer/trait.Layer.html
let tracing_layer = tracing_opentelemetry::layer().with_tracer(tracer).boxed();
// Prepare a metrics pipeline that exports to the OpenTelemetry collector.
let metrics = opentelemetry_otlp::new_pipeline()
.metrics(Tokio)
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
.build()?;
let metrics_layer = MetricsLayer::new(metrics).boxed();
// Add a filter to the subscriber to control the verbosity of the logs
let filter = var("RUST_LOG").unwrap_or_else(|_| "info".to_string());
let env_filter = EnvFilter::builder().parse(filter)?;
// Stack the layers and initialize the subscriber
let stacked_layer = tracing_layer.and_then(metrics_layer).and_then(env_filter);
// Add a fmt subscriber
let filter = EnvFilter::builder().from_env()?;
let stdout = tracing_subscriber::fmt::layer().with_filter(filter).boxed();
tracing_subscriber::registry().with(stacked_layer).with(stdout).init();
Ok(())
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/constants.rs | src/constants.rs | use crate::{config::KakarotRpcConfig, eth_rpc::config::RPCConfig};
use num_traits::ToPrimitive;
use starknet::{
core::types::{Felt, NonZeroFelt},
providers::{jsonrpc::HttpTransport, JsonRpcClient, Provider},
};
use std::sync::LazyLock;
/// The max chain id allowed by [Metamask](https://gist.github.com/rekmarks/a47bd5f2525936c4b8eee31a16345553)
pub static MAX_CHAIN_ID: u64 = (2u64.pow(53) - 39) / 2;
/// The chain id of the underlying Starknet chain.
pub static STARKNET_CHAIN_ID: LazyLock<Felt> = LazyLock::new(|| {
tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
let provider = JsonRpcClient::new(HttpTransport::new(KAKAROT_RPC_CONFIG.network_url.clone()));
provider.chain_id().await.expect("failed to get chain for chain")
})
})
});
/// The chain id for the Ethereum chain running on the Starknet chain.
pub static ETH_CHAIN_ID: LazyLock<u64> = LazyLock::new(|| {
STARKNET_CHAIN_ID.div_rem(&NonZeroFelt::from_felt_unchecked(Felt::from(MAX_CHAIN_ID))).1.to_u64().expect("modulo")
});
/// The Kakarot RPC configuration.
pub static KAKAROT_RPC_CONFIG: LazyLock<KakarotRpcConfig> =
LazyLock::new(|| KakarotRpcConfig::from_env().expect("failed to load Kakarot RPC config"));
/// The RPC configuration.
pub static RPC_CONFIG: LazyLock<RPCConfig> =
LazyLock::new(|| RPCConfig::from_env().expect("failed to load RPC config"));
/// The gas limit for Kakarot blocks.
pub const KKRT_BLOCK_GAS_LIMIT: u64 = 7_000_000;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/pool/mod.rs | src/pool/mod.rs | pub mod constants;
pub mod mempool;
pub mod validate;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/pool/mempool.rs | src/pool/mempool.rs | #![allow(clippy::significant_drop_tightening)]
use super::validate::KakarotTransactionValidator;
use crate::{
client::EthClient,
constants::{KAKAROT_RPC_CONFIG, KKRT_BLOCK_GAS_LIMIT},
into_via_try_wrapper,
pool::constants::ONE_TENTH_ETH,
providers::eth_provider::{database::state::EthDatabase, starknet::relayer::Relayer, BlockProvider},
};
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::{Address, U256};
use rand::{seq::SliceRandom, SeedableRng};
use reth_chainspec::ChainSpec;
use reth_execution_types::ChangedAccount;
use reth_revm::DatabaseRef;
use reth_transaction_pool::{
blobstore::NoopBlobStore, BlockInfo, CanonicalStateUpdate, CoinbaseTipOrdering, EthPooledTransaction, Pool,
TransactionOrigin, TransactionPool, TransactionPoolExt,
};
use starknet::{
core::types::{BlockTag, Felt},
providers::{jsonrpc::HttpTransport, JsonRpcClient},
};
use std::{collections::HashMap, sync::Arc, time::Duration};
use tokio::time::Instant;
use tracing::instrument;
/// A type alias for the Kakarot Transaction Validator.
/// Uses the Reth implementation [`TransactionValidationTaskExecutor`].
pub type Validator<Client> = KakarotTransactionValidator<Client, EthPooledTransaction>;
/// A type alias for the Kakarot Transaction Ordering.
/// Uses the Reth implementation [`CoinbaseTipOrdering`].
pub type TransactionOrdering = CoinbaseTipOrdering<EthPooledTransaction>;
/// A type alias for the Kakarot Sequencer Mempool.
pub type KakarotPool<Client> = Pool<Validator<Client>, TransactionOrdering, NoopBlobStore>;
/// Manages a collection of accounts addresses, interfacing with an Ethereum client.
///
/// This struct provides functionality to initialize account data from a file, monitor account balances,
/// and process transactions for accounts with sufficient balance.
#[derive(Debug)]
pub struct AccountManager<SP: starknet::providers::Provider + Send + Sync + Clone + 'static> {
/// A collection of account addresses.
accounts: Vec<Felt>,
/// The Ethereum client used to interact with the blockchain.
eth_client: Arc<EthClient<SP>>,
}
impl<SP: starknet::providers::Provider + Send + Sync + Clone + 'static> AccountManager<SP> {
/// Initialize the account manager with a set of passed accounts.
pub const fn new(accounts: Vec<Felt>, eth_client: Arc<EthClient<SP>>) -> Self {
Self { accounts, eth_client }
}
/// Starts the account manager task that periodically checks account balances and processes transactions.
#[instrument(skip_all, name = "mempool")]
pub fn start(self) {
let this = Arc::new(self);
tokio::spawn(async move {
loop {
// TODO: add a listener on the pool and only try to call [`best_transaction`]
// TODO: when we are sure there is a transaction in the pool. This avoids an
// TODO: constant loop which rarely yields to the executor combined with a
// TODO: sleep which could sleep for a while before handling transactions.
let best_hashes =
this.eth_client.mempool().as_ref().best_transactions().map(|x| *x.hash()).collect::<Vec<_>>();
if let Some(best_hash) = best_hashes.first() {
let transaction = this.eth_client.mempool().get(best_hash);
if transaction.is_none() {
// Probably a race condition here
continue;
}
let transaction = transaction.expect("not None");
// We remove the transaction to avoid another relayer from picking it up.
this.eth_client.mempool().as_ref().remove_transactions(vec![*best_hash]);
// Spawn a task for the transaction to be sent
let manager = this.clone();
tokio::spawn(async move {
// Lock the relayer account
let hash = transaction.hash();
let maybe_relayer = manager.get_relayer().await;
if maybe_relayer.is_err() {
// If we fail to fetch a relayer, we need to re-insert the transaction in the pool
tracing::error!(target: "account_manager", err = ?maybe_relayer.unwrap_err(), ?hash, "failed to fetch relayer");
let _ = manager
.eth_client
.mempool()
.add_transaction(TransactionOrigin::Local, transaction.transaction.clone())
.await;
return;
}
let relayer = maybe_relayer.expect("not error");
// Send the Ethereum transaction using the relayer
let transaction_signed = transaction.to_recovered_transaction().into_signed();
let res = relayer.relay_transaction(&transaction_signed).await;
if res.is_err() {
// If the relayer failed to relay the transaction, we need to reposition it in the mempool
tracing::error!(target: "account_manager", err = ?res.unwrap_err(), ?hash, "failed to relay transaction");
let _ = manager
.eth_client
.mempool()
.add_transaction(TransactionOrigin::Local, transaction.transaction.clone())
.await;
return;
}
tracing::info!(target: "account_manager", starknet_hash = ?res.expect("not error"), ethereum_hash = ?transaction_signed.hash());
});
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
});
}
/// Returns the next available account from the manager.
pub async fn get_relayer(&self) -> eyre::Result<Relayer<JsonRpcClient<HttpTransport>>>
where
SP: starknet::providers::Provider + Send + Sync + Clone + 'static,
{
// Use `StdRng` instead of `ThreadRng` as it is `Send`
let mut rng = rand::rngs::StdRng::from_entropy();
// Shuffle indices of accounts randomly
let mut account_indices: Vec<_> = (0..self.accounts.len()).collect();
account_indices.shuffle(&mut rng);
for index in account_indices {
let account_address = self.accounts[index];
// Retrieve the balance of the selected account
let balance = self.get_balance(account_address).await?;
// Skip accounts with insufficient balance
if balance < U256::from(ONE_TENTH_ETH) {
continue;
}
// Convert the balance to `Felt`
let balance = into_via_try_wrapper!(balance)?;
// Construct the `Relayer` with the account address and other relevant data
let account = Relayer::new(
account_address,
balance,
JsonRpcClient::new(HttpTransport::new(KAKAROT_RPC_CONFIG.network_url.clone())),
Some(Arc::new(self.eth_client.eth_provider().database().clone())),
);
// Return the locked relayer instance
return Ok(account);
}
Err(eyre::eyre!("failed to fetch funded account"))
}
/// Retrieves the balance of the specified account address for the [`BlockTag::Pending`]
async fn get_balance(&self, account_address: Felt) -> eyre::Result<U256> {
// Get the balance of the address for the Pending block.
self.eth_client
.starknet_provider()
.balance_at(account_address, starknet::core::types::BlockId::Tag(BlockTag::Pending))
.await
.map_err(Into::into)
}
}
#[derive(Default)]
struct LoadedAccounts {
/// All accounts that were loaded
accounts: Vec<ChangedAccount>,
/// All accounts that failed to load
failed_to_load: Vec<Address>,
}
/// Loads all accounts at the given state
///
/// Note: this expects _unique_ addresses
fn load_accounts<SP, I>(client: &Arc<EthClient<SP>>, addresses: I) -> LoadedAccounts
where
SP: starknet::providers::Provider + Send + Sync + Clone + 'static,
I: IntoIterator<Item = Address>,
{
let addresses = addresses.into_iter();
let mut res = LoadedAccounts::default();
let db = EthDatabase::new(Arc::new(client.eth_provider()), BlockNumberOrTag::Latest.into());
for addr in addresses {
if let Ok(maybe_acc) = db.basic_ref(addr) {
let acc = maybe_acc.map_or_else(
|| ChangedAccount::empty(addr),
|acc| ChangedAccount { address: addr, nonce: acc.nonce, balance: acc.balance },
);
res.accounts.push(acc);
} else {
// failed to load account.
res.failed_to_load.push(addr);
}
}
res
}
/// Maintains the transaction pool by periodically polling the database in order to
/// fetch the latest block and mark the block's transactions as mined by the node.
pub fn maintain_transaction_pool<SP>(eth_client: Arc<EthClient<SP>>, prune_duration: Duration)
where
SP: starknet::providers::Provider + Send + Sync + Clone + 'static,
{
tokio::spawn(async move {
let mut block_number = 0u64;
// Mapping to store the transactions in the mempool with a timestamp to potentially prune them
let mut mempool_transactions = HashMap::new();
loop {
// Adding the transactions to the mempool mapping with a timestamp
for tx in eth_client
.mempool()
.queued_transactions()
.into_iter()
.chain(eth_client.mempool().pending_transactions())
{
mempool_transactions.entry(*tx.hash()).or_insert_with(Instant::now);
}
// Fetch the latest block number
let Ok(current_block_number) = eth_client.eth_provider().block_number().await else {
tracing::error!(target: "maintain_transaction_pool", "failed to fetch current block number");
tokio::time::sleep(Duration::from_secs(1)).await;
continue;
};
if current_block_number.to::<u64>() > block_number {
// Fetch the block by number for the latest block
if let Ok(Some(latest_block)) =
eth_client.eth_provider().block_by_number(BlockNumberOrTag::Latest, true).await
{
let hash = latest_block.header.hash;
// If we can convert the RPC block to a primitive block, we proceed
if let Ok(latest_block) = TryInto::<reth_primitives::Block>::try_into(latest_block.inner) {
let latest_header = latest_block.header.clone().seal(hash);
// Update the block information in the pool
let chain_spec = ChainSpec {
chain: eth_client.eth_provider().chain_id.into(),
max_gas_limit: KKRT_BLOCK_GAS_LIMIT,
..Default::default()
};
let info = BlockInfo {
block_gas_limit: KKRT_BLOCK_GAS_LIMIT,
last_seen_block_hash: hash,
last_seen_block_number: latest_header.number,
pending_basefee: latest_header
.next_block_base_fee(
chain_spec.base_fee_params_at_timestamp(latest_header.timestamp + 12),
)
.unwrap_or_default(),
pending_blob_fee: None,
};
eth_client.mempool().set_block_info(info);
// Fetch unique senders from the mempool that are out of sync
let dirty_addresses = eth_client.mempool().unique_senders();
let mut changed_accounts = Vec::new();
// if we have accounts that are out of sync with the pool, we reload them in chunks
if !dirty_addresses.is_empty() {
// can fetch all dirty accounts at once
let reloaded = load_accounts(ð_client.clone(), dirty_addresses);
changed_accounts.extend(reloaded.accounts);
// update the pool with the loaded accounts
eth_client.mempool().update_accounts(changed_accounts.clone());
}
let sealed_block = latest_block.seal(hash);
let mut mined_transactions: Vec<_> =
sealed_block.body.transactions.iter().map(|tx| tx.hash).collect();
// Prune mined transactions from the mempool mapping
for tx_hash in &mined_transactions {
mempool_transactions.remove(tx_hash);
}
// Prune transactions that have been in the mempool for more than 5 minutes
let now = Instant::now();
for (tx_hash, timestamp) in mempool_transactions.clone() {
// - If the transaction has been in the mempool for more than 5 minutes
// - And the transaction is in the mempool right now
if now.duration_since(timestamp) > prune_duration && eth_client.mempool().contains(&tx_hash)
{
tracing::warn!(target: "maintain_transaction_pool", ?tx_hash, "pruning");
// Add the transaction to the mined transactions so that it can be pruned
mined_transactions.push(tx_hash);
// Remove the transaction from the mempool mapping
mempool_transactions.remove(&tx_hash);
}
}
// Canonical update
let update = CanonicalStateUpdate {
new_tip: &sealed_block,
pending_block_base_fee: info.pending_basefee,
pending_block_blob_fee: None,
changed_accounts,
mined_transactions,
};
eth_client.mempool().on_canonical_state_change(update);
block_number = current_block_number.to();
} else {
tracing::error!(target: "maintain_transaction_pool", "failed to convert block");
}
} else {
tracing::error!(target: "maintain_transaction_pool", "failed to fetch latest block");
}
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
});
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/pool/constants.rs | src/pool/constants.rs | use std::time::Duration;
pub(super) static ONE_TENTH_ETH: u64 = 10u64.pow(17);
// Transactions should be pruned after 5 minutes in the mempool
pub const PRUNE_DURATION: Duration = Duration::from_secs(300);
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/pool/validate.rs | src/pool/validate.rs | #![allow(unused_variables, clippy::struct_excessive_bools)]
use crate::providers::eth_provider::{
database::state::EthDatabase, provider::EthereumProvider,
starknet::kakarot_core::get_white_listed_eip_155_transaction_hashes,
};
use alloy_consensus::constants::{EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, LEGACY_TX_TYPE_ID};
use alloy_rpc_types::BlockNumberOrTag;
use reth_chainspec::ChainSpec;
use reth_primitives::{GotExpected, InvalidTransactionError, SealedBlock};
use reth_revm::DatabaseRef;
use reth_transaction_pool::{
error::InvalidPoolTransactionError,
validate::{ensure_intrinsic_gas, ForkTracker, ValidTransaction, DEFAULT_MAX_TX_INPUT_BYTES},
EthPoolTransaction, TransactionOrigin, TransactionValidationOutcome, TransactionValidator,
};
use std::{
marker::PhantomData,
sync::{atomic::AtomicBool, Arc},
};
#[derive(Debug, Clone)]
pub struct KakarotTransactionValidatorBuilder {
pub chain_spec: Arc<ChainSpec>,
/// Fork indicator whether we are in the Shanghai stage.
pub shanghai: bool,
/// Fork indicator whether we are in the Cancun hardfork.
pub cancun: bool,
/// Fork indicator whether we are in the Prague hardfork.
pub prague: bool,
/// Whether using EIP-2718 type transactions is allowed
pub eip2718: bool,
/// Whether using EIP-1559 type transactions is allowed
pub eip1559: bool,
/// Whether using EIP-4844 type transactions is allowed
pub eip4844: bool,
/// The current max gas limit
pub block_gas_limit: u64,
/// Max size in bytes of a single transaction allowed
pub max_tx_input_bytes: usize,
}
impl KakarotTransactionValidatorBuilder {
/// Creates a new builder for the given [`ChainSpec`]
///
/// By default, this assumes the network is on the `Cancun` hardfork and the following
/// transactions are allowed:
/// - Legacy
/// - EIP-2718
/// - EIP-1559
pub fn new(chain_spec: &Arc<ChainSpec>) -> Self {
Self {
chain_spec: chain_spec.clone(),
block_gas_limit: chain_spec.max_gas_limit,
max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES,
// by default all transaction types are allowed except EIP-4844
eip2718: true,
eip1559: true,
eip4844: false,
// shanghai is activated by default
shanghai: true,
// cancun is activated by default
cancun: true,
// prague not yet activated
prague: false,
}
}
/// Builds the [`EthTransactionValidator`] without spawning validator tasks.
pub fn build<P, Tx>(self, provider: P) -> KakarotTransactionValidator<P, Tx>
where
P: EthereumProvider + Send + Sync,
{
let Self {
chain_spec,
shanghai,
cancun,
prague,
eip2718,
eip1559,
eip4844,
block_gas_limit,
max_tx_input_bytes,
..
} = self;
let fork_tracker = ForkTracker {
shanghai: AtomicBool::new(shanghai),
cancun: AtomicBool::new(cancun),
prague: AtomicBool::new(prague),
};
let inner = KakarotTransactionValidatorInner {
chain_spec,
provider,
eip2718,
eip1559,
eip4844,
block_gas_limit,
max_tx_input_bytes,
fork_tracker,
_marker: Default::default(),
};
KakarotTransactionValidator { inner: Arc::new(inner) }
}
}
/// Validator for Ethereum transactions.
#[derive(Debug, Clone)]
pub struct KakarotTransactionValidator<P, T>
where
P: EthereumProvider + Send + Sync,
{
/// The type that performs the actual validation.
inner: Arc<KakarotTransactionValidatorInner<P, T>>,
}
impl<P, Tx> KakarotTransactionValidator<P, Tx>
where
P: EthereumProvider + Send + Sync,
{
/// Returns the configured chain spec
pub fn chain_spec(&self) -> Arc<ChainSpec> {
self.inner.chain_spec.clone()
}
/// Returns the provider
pub fn provider(&self) -> &P {
&self.inner.provider
}
}
impl<P, Tx> KakarotTransactionValidator<P, Tx>
where
P: EthereumProvider + Send + Sync,
Tx: EthPoolTransaction,
{
/// Validates a single transaction.
///
/// See also [`TransactionValidator::validate_transaction`]
pub fn validate_one(&self, transaction: Tx) -> TransactionValidationOutcome<Tx> {
self.inner.validate_one(transaction)
}
/// Validates all given transactions.
///
/// Returns all outcomes for the given transactions in the same order.
///
/// See also [`Self::validate_one`]
pub fn validate_all(&self, transactions: Vec<(TransactionOrigin, Tx)>) -> Vec<TransactionValidationOutcome<Tx>> {
transactions.into_iter().map(|(_origin, tx)| self.validate_one(tx)).collect()
}
}
impl<P, Tx> TransactionValidator for KakarotTransactionValidator<P, Tx>
where
P: EthereumProvider + Send + Sync,
Tx: EthPoolTransaction,
{
type Transaction = Tx;
async fn validate_transaction(
&self,
_origin: TransactionOrigin,
transaction: Self::Transaction,
) -> TransactionValidationOutcome<Self::Transaction> {
self.validate_one(transaction)
}
async fn validate_transactions(
&self,
transactions: Vec<(TransactionOrigin, Self::Transaction)>,
) -> Vec<TransactionValidationOutcome<Self::Transaction>> {
self.validate_all(transactions)
}
fn on_new_head_block(&self, _new_tip_block: &SealedBlock) {}
}
/// A [`TransactionValidator`] implementation that validates ethereum transaction.
#[derive(Debug)]
pub(crate) struct KakarotTransactionValidatorInner<P, T>
where
P: EthereumProvider + Send + Sync,
{
/// Spec of the chain
chain_spec: Arc<ChainSpec>,
/// This type fetches network info.
provider: P,
/// Fork indicator whether we are using EIP-2718 type transactions.
eip2718: bool,
/// Fork indicator whether we are using EIP-1559 type transactions.
eip1559: bool,
/// Fork indicator whether we are using EIP-4844 blob transactions.
eip4844: bool,
/// The current max gas limit
block_gas_limit: u64,
/// Maximum size in bytes a single transaction can have in order to be accepted into the pool.
max_tx_input_bytes: usize,
/// tracks activated forks relevant for transaction validation
fork_tracker: ForkTracker,
/// Marker for the transaction type
_marker: PhantomData<T>,
}
impl<P, Tx> KakarotTransactionValidatorInner<P, Tx>
where
P: EthereumProvider + Send + Sync,
{
/// Returns the configured chain id
pub(crate) fn chain_id(&self) -> u64 {
self.chain_spec.chain().id()
}
}
impl<P, Tx> KakarotTransactionValidatorInner<P, Tx>
where
P: EthereumProvider + Send + Sync,
Tx: EthPoolTransaction,
{
/// Validates a single transaction.
#[allow(clippy::too_many_lines)]
fn validate_one(&self, transaction: Tx) -> TransactionValidationOutcome<Tx> {
// Checks for tx_type
match transaction.tx_type() {
LEGACY_TX_TYPE_ID => {
if transaction.chain_id().is_none()
&& !get_white_listed_eip_155_transaction_hashes().contains(transaction.hash())
{
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TxTypeNotSupported.into(),
);
}
}
EIP2930_TX_TYPE_ID => {
// Accept only legacy transactions until EIP-2718/2930 activates
if !self.eip2718 {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip2930Disabled.into(),
);
}
}
EIP1559_TX_TYPE_ID => {
// Reject dynamic fee transactions until EIP-1559 activates.
if !self.eip1559 {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip1559Disabled.into(),
);
}
}
EIP4844_TX_TYPE_ID => {
// Reject blob transactions.
if !self.eip4844 {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::Eip4844Disabled.into(),
);
}
}
_ => {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::TxTypeNotSupported.into(),
)
}
};
// Reject transactions over defined size to prevent DOS attacks
let transaction_size = transaction.size();
if transaction_size > self.max_tx_input_bytes {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::OversizedData(transaction_size, self.max_tx_input_bytes),
);
}
// Checks for gas limit
let transaction_gas_limit = transaction.gas_limit();
if transaction_gas_limit > self.block_gas_limit {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidPoolTransactionError::ExceedsGasLimit(transaction_gas_limit, self.block_gas_limit),
);
}
// Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any.
if transaction.max_priority_fee_per_gas() > Some(transaction.max_fee_per_gas()) {
return TransactionValidationOutcome::Invalid(transaction, InvalidTransactionError::TipAboveFeeCap.into());
}
// Checks for chainid
if let Some(chain_id) = transaction.chain_id() {
if chain_id != self.chain_id() {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::ChainIdMismatch.into(),
);
}
}
// intrinsic gas checks
if let Err(err) = ensure_intrinsic_gas(&transaction, &self.fork_tracker) {
return TransactionValidationOutcome::Invalid(transaction, err);
}
// Fetch the account state for the Pending block
let db = EthDatabase::new(Arc::new(&self.provider), BlockNumberOrTag::Pending.into());
let account = match db.basic_ref(transaction.sender()) {
Ok(account) => account.unwrap_or_default(),
Err(err) => return TransactionValidationOutcome::Error(*transaction.hash(), Box::new(err)),
};
// Signer account shouldn't have bytecode. Presence of bytecode means this is a
// smartcontract.
if !account.is_empty_code_hash() {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::SignerAccountHasBytecode.into(),
);
}
// Checks for nonce
if transaction.nonce() < account.nonce {
return TransactionValidationOutcome::Invalid(
transaction.clone(),
InvalidTransactionError::NonceNotConsistent { tx: transaction.nonce(), state: account.nonce }.into(),
);
}
let cost = transaction.cost();
// Checks for max cost
if cost > account.balance {
return TransactionValidationOutcome::Invalid(
transaction,
InvalidTransactionError::InsufficientFunds(GotExpected { got: account.balance, expected: cost }.into())
.into(),
);
}
let maybe_blob_sidecar = None;
// Return the valid transaction
TransactionValidationOutcome::Valid {
balance: account.balance,
state_nonce: account.nonce,
transaction: ValidTransaction::new(transaction, maybe_blob_sidecar),
// by this point assume all external transactions should be propagated
propagate: true,
}
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/tracing/builder.rs | src/tracing/builder.rs | use super::{Tracer, TracerResult};
use crate::providers::eth_provider::{
database::{
state::{EthCacheDatabase, EthDatabase},
types::transaction::ExtendedTransaction,
},
error::{EthApiError, TransactionError},
provider::EthereumProvider,
};
use alloy_primitives::{B256, U256};
use alloy_rpc_types::{Block, BlockId, BlockTransactions, Header};
use alloy_rpc_types_trace::geth::{GethDebugTracingCallOptions, GethDebugTracingOptions};
use reth_revm::{
db::CacheDB,
primitives::{BlockEnv, CfgEnv, Env, EnvWithHandlerCfg, HandlerCfg, SpecId},
};
use revm_inspectors::tracing::TracingInspectorConfig;
#[derive(Debug, Clone)]
pub struct Floating;
#[derive(Debug)]
pub struct Pinned;
/// Representing different tracing options for transactions.
#[derive(Clone, Debug)]
pub enum TracingOptions {
/// Geth debug tracing options.
Geth(GethDebugTracingOptions),
/// Parity tracing options.
Parity(TracingInspectorConfig),
/// Geth debug call tracing options.
GethCall(GethDebugTracingCallOptions),
}
impl TracingOptions {
/// Returns `Some` with a reference to [`GethDebugTracingOptions`] if this is `Geth`,
/// otherwise returns `None`.
pub const fn as_geth(&self) -> Option<&GethDebugTracingOptions> {
if let Self::Geth(ref options) = self {
Some(options)
} else {
None
}
}
/// Returns `Some` with a reference to [`TracingInspectorConfig`] if this is `Parity`,
/// otherwise returns `None`.
pub const fn as_parity(&self) -> Option<&TracingInspectorConfig> {
if let Self::Parity(ref config) = self {
Some(config)
} else {
None
}
}
/// Returns `Some` with a reference to [`GethDebugTracingCallOptions`] if this is `GethCall`,
/// otherwise returns `None`.
pub const fn as_geth_call(&self) -> Option<&GethDebugTracingCallOptions> {
if let Self::GethCall(ref options) = self {
Some(options)
} else {
None
}
}
}
impl Default for TracingOptions {
fn default() -> Self {
GethDebugTracingOptions::default().into()
}
}
impl From<GethDebugTracingOptions> for TracingOptions {
fn from(options: GethDebugTracingOptions) -> Self {
Self::Geth(options)
}
}
impl From<TracingInspectorConfig> for TracingOptions {
fn from(config: TracingInspectorConfig) -> Self {
Self::Parity(config)
}
}
impl From<GethDebugTracingCallOptions> for TracingOptions {
fn from(options: GethDebugTracingCallOptions) -> Self {
Self::GethCall(options)
}
}
#[derive(Debug, Clone)]
pub struct TracerBuilder<P: EthereumProvider + Send + Sync + Clone, Status = Floating> {
eth_provider: P,
env: Env,
block: Block<ExtendedTransaction>,
tracing_options: TracingOptions,
_phantom: std::marker::PhantomData<Status>,
}
/// Block gas limit for tracing. Set to an arbitrarily high value to never run out.
///
/// Block gas limit is only partially enforced in Cairo EVM layer: <https://github.com/kkrt-labs/kakarot/blob/98b26fda32c36f09880ed0c7f44dba7f4d669b61/src/kakarot/accounts/library.cairo#L245>
/// Remove when block gas limit is enforced consistently (i.e. when we check that a transaction's gas limit is lower than the block gas limit as well as the current block's cumulative gas)
pub const TRACING_BLOCK_GAS_LIMIT: u64 = 1_000_000_000;
impl<P: EthereumProvider + Send + Sync + Clone> TracerBuilder<P, Floating> {
pub async fn new(eth_provider: P) -> TracerResult<Self> {
let cfg = CfgEnv::default().with_chain_id(eth_provider.chain_id().await?.unwrap_or_default().to());
let env = Env { cfg, ..Default::default() };
Ok(Self {
eth_provider,
env,
block: Default::default(),
tracing_options: Default::default(),
_phantom: std::marker::PhantomData,
})
}
/// Sets the block to trace
pub async fn with_block_id(self, block_id: BlockId) -> TracerResult<TracerBuilder<P, Pinned>> {
let block = self.block(block_id).await?;
Ok(TracerBuilder {
eth_provider: self.eth_provider.clone(),
env: self.env.clone(),
block,
tracing_options: self.tracing_options.clone(),
_phantom: std::marker::PhantomData,
})
}
/// Sets the block to trace given the transaction hash
pub async fn with_transaction_hash(self, transaction_hash: B256) -> TracerResult<TracerBuilder<P, Pinned>> {
let transaction = self
.eth_provider
.transaction_by_hash(transaction_hash)
.await?
.ok_or(EthApiError::TransactionNotFound(transaction_hash))?;
// we can't trace a pending transaction
if transaction.block_number.is_none() {
return Err(EthApiError::TransactionNotFound(transaction_hash));
}
self.with_block_id(transaction.block_number.unwrap().into()).await
}
/// Fetches a block from the Ethereum provider given a block id
///
/// # Returns
///
/// Returns the block if it exists, otherwise returns None
async fn block(&self, block_id: BlockId) -> TracerResult<alloy_rpc_types::Block<ExtendedTransaction>> {
let block = match block_id {
BlockId::Hash(hash) => self.eth_provider.block_by_hash(hash.block_hash, true).await?,
BlockId::Number(number) => self.eth_provider.block_by_number(number, true).await?,
}
.ok_or(match block_id {
BlockId::Hash(hash) => EthApiError::UnknownBlock(hash.block_hash.into()),
BlockId::Number(number) => EthApiError::UnknownBlock(number.as_number().unwrap_or_default().into()),
})?;
// we can't trace a pending block
if block.header.hash.is_zero() {
return Err(EthApiError::UnknownBlock(B256::ZERO.into()));
}
Ok(block.inner)
}
}
impl<P: EthereumProvider + Send + Sync + Clone> TracerBuilder<P, Pinned> {
/// Sets the tracing options
#[must_use]
pub fn with_tracing_options(mut self, tracing_options: TracingOptions) -> Self {
self.tracing_options = tracing_options;
self
}
/// Builds the tracer.
pub fn build(self) -> TracerResult<Tracer<P>> {
let transactions = match &self.block.transactions {
BlockTransactions::Full(transactions) => transactions.clone(),
_ => return Err(TransactionError::ExpectedFullTransactions.into()),
};
let env = self.init_env_with_handler_config();
// DB should use the state of the parent block
let db =
EthCacheDatabase(CacheDB::new(EthDatabase::new(self.eth_provider, self.block.header.parent_hash.into())));
let tracing_options = self.tracing_options;
Ok(Tracer { transactions, env, db, tracing_options })
}
/// Init an `EnvWithHandlerCfg`.
fn init_env_with_handler_config(&self) -> EnvWithHandlerCfg {
let env = Box::new(self.init_env_with_block_env());
EnvWithHandlerCfg::new(env, HandlerCfg::new(SpecId::CANCUN))
}
/// Inits the Env by using `self.block` to set the block environment.
fn init_env_with_block_env(&self) -> Env {
let mut env = self.env.clone();
let Header { number, timestamp, miner, base_fee_per_gas, difficulty, .. } = self.block.header.clone();
let block_env = BlockEnv {
number: U256::from(number),
timestamp: U256::from(timestamp),
gas_limit: U256::from(TRACING_BLOCK_GAS_LIMIT),
coinbase: miner,
basefee: U256::from(base_fee_per_gas.unwrap_or_default()),
prevrandao: Some(B256::from_slice(&difficulty.to_be_bytes::<32>()[..])),
..Default::default()
};
env.block = block_env;
env
}
}
// The following tests validates the behavior of the TracerBuilder when interacting with a mock Ethereum provider.
// Each test focuses on different scenarios where the TracerBuilder is expected to handle various errors correctly,
// such as unknown blocks, not found transactions, and invalid chain IDs.
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::mock_provider::MockEthereumProviderStruct;
use alloy_primitives::U64;
use alloy_rpc_types::Transaction;
use alloy_serde::WithOtherFields;
use std::sync::Arc;
#[tokio::test]
async fn test_tracer_builder_block_failure_with_none_block_number() {
// Create a mock Ethereum provider
let mut mock_provider = MockEthereumProviderStruct::new();
// Expect the chain_id call to return 1
mock_provider.expect_chain_id().returning(|| Ok(Some(U64::from(1))));
// Expect the block_by_number call to return an error for an unknown block
mock_provider.expect_block_by_number().returning(|_, _| Ok(None));
// Create a TracerBuilder with the mock provider
let builder = TracerBuilder::new(Arc::new(&mock_provider)).await.unwrap();
// Attempt to use the builder with a specific block ID, expecting an error
let result = builder.block(BlockId::Number(1.into())).await;
// Check that the result is an UnknownBlock error
assert!(matches!(result, Err(EthApiError::UnknownBlock(_))));
}
#[tokio::test]
async fn test_tracer_builder_block_failure_with_none_block_hash() {
// Create a mock Ethereum provider
let mut mock_provider = MockEthereumProviderStruct::new();
// Expect the chain_id call to return 1
mock_provider.expect_chain_id().returning(|| Ok(Some(U64::from(1))));
// Expect the block_by_hash call to return an error for an unknown block
mock_provider.expect_block_by_hash().returning(|_, _| Ok(None));
// Create a TracerBuilder with the mock provider
let builder = TracerBuilder::new(Arc::new(&mock_provider)).await.unwrap();
// Attempt to use the builder with a specific block hash, expecting an error
let result = builder.block(B256::repeat_byte(1).into()).await;
// Check that the result is an UnknownBlock error
assert!(matches!(result, Err(EthApiError::UnknownBlock(_))));
}
#[tokio::test]
async fn test_tracer_builder_with_transaction_not_found() {
// Create a mock Ethereum provider
let mut mock_provider = MockEthereumProviderStruct::new();
// Expect the chain_id call to return 1
mock_provider.expect_chain_id().returning(|| Ok(Some(U64::from(1))));
// Expect the transaction_by_hash call to return Ok(None) for not found transaction
mock_provider.expect_transaction_by_hash().returning(|_| Ok(None));
// Create a TracerBuilder with the mock provider
let builder = TracerBuilder::new(Arc::new(&mock_provider)).await.unwrap();
// Attempt to use the builder with a specific transaction hash, expecting an error
let result = builder.with_transaction_hash(B256::repeat_byte(0)).await;
// Check that the result is a TransactionNotFound error
assert!(matches!(result, Err(EthApiError::TransactionNotFound(_))));
}
#[tokio::test]
async fn test_tracer_builder_with_unknown_block() {
// Create a mock Ethereum provider
let mut mock_provider = MockEthereumProviderStruct::new();
// Expect the chain_id call to return 1
mock_provider.expect_chain_id().returning(|| Ok(Some(U64::from(1))));
// Expect the transaction_by_hash call to return a transaction with no block number
mock_provider
.expect_transaction_by_hash()
.returning(|_| Ok(Some(WithOtherFields::new(Transaction { block_number: None, ..Default::default() }))));
// Create a TracerBuilder with the mock provider
let builder = TracerBuilder::new(Arc::new(&mock_provider)).await.unwrap();
// Attempt to use the builder with a specific transaction hash, expecting an error
let result = builder.with_transaction_hash(B256::repeat_byte(0)).await;
// Check that the result is an UnknownBlock error
assert!(matches!(result, Err(EthApiError::TransactionNotFound(_))));
}
#[tokio::test]
async fn test_tracer_builder_build_error() {
// Create a mock Ethereum provider
let mut mock_provider = MockEthereumProviderStruct::new();
// Expect the chain_id call to return 1
mock_provider.expect_chain_id().returning(|| Ok(Some(U64::from(1))));
// Expect the block_by_number call to return a block with non-full transactions
mock_provider.expect_block_by_number().returning(|_, _| {
Ok(Some(WithOtherFields::new(Block {
transactions: BlockTransactions::Hashes(vec![]),
header: Header { hash: B256::repeat_byte(1), ..Default::default() },
..Default::default()
})))
});
// Create a TracerBuilder with the mock provider
let builder = TracerBuilder::new(Arc::new(&mock_provider)).await.unwrap();
// Attempt to use the builder with a specific block ID
let builder = builder.with_block_id(BlockId::Number(1.into())).await.unwrap();
// Attempt to build the tracer, expecting an error
let result = builder.build();
// Check that the result is an ExpectedFullTransactions error
assert!(matches!(result, Err(EthApiError::Transaction(TransactionError::ExpectedFullTransactions))));
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/tracing/mod.rs | src/tracing/mod.rs | pub mod builder;
use crate::{
providers::eth_provider::{
database::state::EthCacheDatabase,
error::{EthApiError, TransactionError},
provider::EthereumProvider,
},
tracing::builder::TracingOptions,
};
use alloy_primitives::{ruint::FromUintError, B256};
use alloy_rpc_types::{TransactionInfo, TransactionRequest};
use alloy_rpc_types_trace::{
geth::{
GethDebugBuiltInTracerType, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions,
GethTrace, TraceResult,
},
parity::LocalizedTransactionTrace,
};
use alloy_serde::WithOtherFields;
use eyre::eyre;
use reth_evm_ethereum::EthEvmConfig;
use reth_node_api::{ConfigureEvm, ConfigureEvmEnv};
use reth_revm::{
primitives::{Env, EnvWithHandlerCfg},
DatabaseCommit,
};
use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig};
use std::{collections::HashMap, sync::Arc};
pub type TracerResult<T> = Result<T, EthApiError>;
/// Represents the result of tracing a transaction.
type TracingStateResult = TracerResult<(TracingResult, reth_revm::primitives::EvmState)>;
/// Representing the result of tracing transactions.
#[derive(Clone, Debug)]
enum TracingResult {
/// Geth trace results.
Geth(Vec<TraceResult>),
/// Parity trace results.
Parity(Vec<LocalizedTransactionTrace>),
}
impl TracingResult {
/// Converts the tracing result into Geth traces.
const fn as_geth(&self) -> Option<&Vec<TraceResult>> {
if let Self::Geth(traces) = self {
Some(traces)
} else {
None
}
}
/// Converts the tracing result into Parity traces.
const fn as_parity(&self) -> Option<&Vec<LocalizedTransactionTrace>> {
if let Self::Parity(traces) = self {
Some(traces)
} else {
None
}
}
/// Creates a default failure [`TracingResult`] based on the [`TracingOptions`].
fn default_failure(tracing_options: &TracingOptions, tx: &WithOtherFields<alloy_rpc_types::Transaction>) -> Self {
match tracing_options {
TracingOptions::Geth(_) | TracingOptions::GethCall(_) => Self::Geth(vec![TraceResult::Success {
result: GethTrace::Default(alloy_rpc_types_trace::geth::DefaultFrame {
failed: true,
..Default::default()
}),
tx_hash: Some(tx.hash),
}]),
TracingOptions::Parity(_) => Self::Parity(
TracingInspector::default()
.into_parity_builder()
.into_localized_transaction_traces(TransactionInfo::from(&tx.inner)),
),
}
}
}
#[derive(Debug)]
pub struct Tracer<P: EthereumProvider + Send + Sync> {
transactions: Vec<WithOtherFields<alloy_rpc_types::Transaction>>,
env: EnvWithHandlerCfg,
db: EthCacheDatabase<P>,
tracing_options: TracingOptions,
}
impl<P: EthereumProvider + Send + Sync + Clone> Tracer<P> {
/// Traces the transaction with Geth tracing options and returns the resulting traces and state.
fn trace_geth(
env: EnvWithHandlerCfg,
db: &EthCacheDatabase<P>,
tx: &WithOtherFields<alloy_rpc_types::Transaction>,
opts: GethDebugTracingOptions,
) -> TracingStateResult {
// Extract options
let GethDebugTracingOptions { tracer_config, config, tracer, .. } = opts;
// Check if tracer is provided
if let Some(tracer) = tracer {
match tracer {
// Only support CallTracer for now
GethDebugTracerType::BuiltInTracer(GethDebugBuiltInTracerType::CallTracer) => {
// Convert tracer config to call config
let call_config = tracer_config
.clone()
.into_call_config()
.map_err(|err| EthApiError::Transaction(TransactionError::Tracing(err.into())))?;
// Initialize tracing inspector with call config
let mut inspector =
TracingInspector::new(TracingInspectorConfig::from_geth_call_config(&call_config));
// Build EVM with environment and inspector
let eth_evm_config = EthEvmConfig::new(Arc::new(Default::default()));
let res = {
let mut evm = eth_evm_config.evm_with_env_and_inspector(db.0.clone(), env, &mut inspector);
// Execute transaction
evm.transact().map_err(|err| TransactionError::Tracing(err.into()))?
};
// Get call traces
let call_frame = inspector.into_geth_builder().geth_call_traces(
tracer_config.into_call_config().map_err(|err| TransactionError::Tracing(err.into()))?,
res.result.gas_used(),
);
// Return success trace result
return Ok((
TracingResult::Geth(vec![TraceResult::Success {
result: call_frame.into(),
tx_hash: Some(tx.hash),
}]),
res.state,
));
}
// Return error for unsupported tracers
_ => {
return Err(EthApiError::Transaction(TransactionError::Tracing(
eyre!("only call tracer is currently supported").into(),
)))
}
}
}
// Use default tracer
let mut inspector = TracingInspector::new(TracingInspectorConfig::from_geth_config(&config));
let eth_evm_config = EthEvmConfig::new(Arc::new(Default::default()));
let res = {
let mut evm = eth_evm_config.evm_with_env_and_inspector(db.0.clone(), env, &mut inspector);
// Execute transaction
evm.transact().map_err(|err| TransactionError::Tracing(err.into()))?
};
let gas_used = res.result.gas_used();
let return_value = res.result.into_output().unwrap_or_default();
let frame = inspector.into_geth_builder().geth_traces(gas_used, return_value, config);
Ok((
TracingResult::Geth(vec![TraceResult::Success { result: frame.into(), tx_hash: Some(tx.hash) }]),
res.state,
))
}
/// Traces the transaction with Parity tracing options and returns the resulting traces and state.
fn trace_parity(
env: EnvWithHandlerCfg,
db: &EthCacheDatabase<P>,
tx: &WithOtherFields<alloy_rpc_types::Transaction>,
tracing_config: TracingInspectorConfig,
) -> TracingStateResult {
// Get block base fee
let block_base_fee = env
.env
.block
.basefee
.try_into()
.map_err(|err: FromUintError<u128>| TransactionError::Tracing(err.into()))?;
// Initialize tracing inspector with given config
let mut inspector = TracingInspector::new(tracing_config);
// Build EVM with environment and inspector
let eth_evm_config = EthEvmConfig::new(Arc::new(Default::default()));
// Execute transaction
let res = {
let mut evm = eth_evm_config.evm_with_env_and_inspector(db.0.clone(), env, &mut inspector);
// Execute transaction
evm.transact().map_err(|err| TransactionError::Tracing(err.into()))?
};
// Create transaction info
let transaction_info = TransactionInfo::from(&tx.inner).with_base_fee(block_base_fee);
// Return Parity trace result
Ok((
TracingResult::Parity(inspector.into_parity_builder().into_localized_transaction_traces(transaction_info)),
res.state,
))
}
/// Trace the block in the parity format.
pub fn trace_block(self) -> TracerResult<Option<Vec<LocalizedTransactionTrace>>> {
let txs = self.transactions.clone();
Ok(Some(self.trace_transactions(TracingResult::as_parity, &txs)?))
}
/// Returns the debug trace in the Geth.
/// Currently only supports the call tracer or the default tracer.
pub fn debug_block(self) -> TracerResult<Vec<TraceResult>> {
let txs = self.transactions.clone();
self.trace_transactions(TracingResult::as_geth, &txs)
}
pub fn debug_transaction(mut self, transaction_hash: B256) -> TracerResult<GethTrace> {
for tx in self.transactions.clone() {
if tx.hash == transaction_hash {
// We only want to trace the transaction with the given hash.
let trace = self
.trace_transactions(TracingResult::as_geth, &[tx])?
.first()
.cloned()
.ok_or(TransactionError::Tracing(eyre!("No trace found").into()))?;
return match trace {
TraceResult::Success { result, .. } => Ok(result),
TraceResult::Error { error, .. } => Err(TransactionError::Tracing(error.into()).into()),
};
}
let env = env_with_tx(&self.env, &tx)?;
let eth_evm_config = EthEvmConfig::new(Arc::new(Default::default()));
let mut evm = eth_evm_config.evm_with_env(&mut self.db.0, env);
evm.transact_commit().map_err(|err| TransactionError::Tracing(err.into()))?;
}
Err(EthApiError::TransactionNotFound(transaction_hash))
}
/// Debugs a transaction request by tracing it using the provided tracing options.
///
/// This function returns an error if the tracing options are not supported or if there is an issue
/// with the EVM environment or transaction execution.
pub fn debug_transaction_request(self, _request: &TransactionRequest) -> TracerResult<GethTrace> {
// Attempt to get Geth tracing options from the provided tracing options.
let opts = self
.tracing_options
.as_geth_call()
.ok_or_else(|| {
// Return an error if the tracing options are not supported.
EthApiError::Transaction(TransactionError::Tracing(
eyre!("only `GethDebugTracingCallOptions` tracing options are supported for call tracing").into(),
))
})?
.clone();
// Extract the tracing options from the obtained Geth tracing options.
let GethDebugTracingCallOptions { tracing_options, .. } = opts;
let GethDebugTracingOptions { tracer, tracer_config, .. } = tracing_options;
// Check if a tracer is provided.
if let Some(tracer) = tracer {
match tracer {
// Only support CallTracer for now.
GethDebugTracerType::BuiltInTracer(GethDebugBuiltInTracerType::CallTracer) => {
// Build the EVM environment using the provided configuration and request.
// TODO: build_call_evm_env not available anymore, to be discussed
// let env = build_call_evm_env(
// CfgEnvWithHandlerCfg { cfg_env: self.env.cfg.clone(), handler_cfg: self.env.handler_cfg },
// self.env.block.clone(),
// request.clone(),
// )?;
// Convert the tracer configuration into call configuration.
let call_config =
tracer_config.into_call_config().map_err(|err| TransactionError::Tracing(err.into()))?;
// Create a new tracing inspector with the call configuration.
let mut inspector =
TracingInspector::new(TracingInspectorConfig::from_geth_call_config(&call_config));
// Build EVM with environment and inspector.
let eth_evm_config = EthEvmConfig::new(Arc::new(Default::default()));
// TODO: we should not use default here to be discussed
let gas_used = {
let mut evm = eth_evm_config.evm_with_env_and_inspector(
self.db.0,
EnvWithHandlerCfg::default(),
&mut inspector,
);
// Execute the transaction.
let res = evm.transact().map_err(|err| TransactionError::Tracing(err.into()))?;
// Capture the gas used before `evm` goes out of scope.
res.result.gas_used()
};
// Get the call traces from the inspector.
let frame = inspector.into_geth_builder().geth_call_traces(call_config, gas_used);
// Return the obtained call traces.
return Ok(frame.into());
}
// Return an error for unsupported tracers.
_ => {
return Err(EthApiError::Transaction(TransactionError::Tracing(
eyre!("only call tracer is currently supported").into(),
)))
}
}
}
// Return a default Geth trace if no tracer is provided.
Ok(GethTrace::Default(Default::default()))
}
/// Traces the provided transactions using the given closure.
/// The `convert_result` closure takes the resulting tracing result
/// and converts it into the desired type.
fn trace_transactions<T: Clone>(
self,
convert_result: fn(&TracingResult) -> Option<&Vec<T>>,
transactions: &[WithOtherFields<alloy_rpc_types::Transaction>],
) -> TracerResult<Vec<T>> {
let mut traces: Vec<T> = Vec::with_capacity(self.transactions.len());
let mut transactions = transactions.iter().peekable();
let mut db = self.db;
while let Some(tx) = transactions.next() {
let env = env_with_tx(&self.env, tx)?;
let (res, state_changes) = if tx.other.get("reverted").is_some() {
(TracingResult::default_failure(&self.tracing_options, tx), HashMap::default())
} else {
match &self.tracing_options {
TracingOptions::Geth(opts) => Self::trace_geth(env, &db, tx, opts.clone())?,
TracingOptions::Parity(tracing_config) => Self::trace_parity(env, &db, tx, *tracing_config)?,
TracingOptions::GethCall(_) => {
return Err(EthApiError::Transaction(TransactionError::Tracing(
eyre!("`TracingOptions::GethCall` is not supported in `trace_transactions` context").into(),
)))
}
}
};
if let Some(result) = convert_result(&res) {
traces.append(&mut result.clone());
}
// Only commit to the database if there are more transactions to process.
if transactions.peek().is_some() {
db.0.commit(state_changes);
}
}
TracerResult::Ok(traces)
}
}
/// Returns the environment with the transaction env updated to the given transaction.
fn env_with_tx(
env: &EnvWithHandlerCfg,
tx: &WithOtherFields<alloy_rpc_types::Transaction>,
) -> TracerResult<EnvWithHandlerCfg> {
// Convert the transaction to an ec recovered transaction and update the env with it.
let tx_env = EthEvmConfig::new(Arc::new(Default::default())).tx_env(&tx.clone().try_into()?, tx.from);
Ok(EnvWithHandlerCfg {
env: Env::boxed(env.env.cfg.clone(), env.env.block.clone(), tx_env),
handler_cfg: env.handler_cfg,
})
}
#[cfg(test)]
mod tests {
use super::*;
use crate::providers::{
eth_provider::{database::Database, provider::EthDataProvider},
sn_provider::StarknetProvider,
};
use builder::TracerBuilder;
use mongodb::options::{DatabaseOptions, ReadConcern, WriteConcern};
use starknet::providers::{jsonrpc::HttpTransport, JsonRpcClient};
use std::{str::FromStr, sync::Arc};
use url::Url;
#[tokio::test(flavor = "multi_thread")]
#[ignore = "this test is used for debugging purposes only"]
async fn test_debug_tracing() {
// Set the env vars
std::env::set_var("KAKAROT_ADDRESS", "CHECK THE KAKAROT ADDRESS FOR THE BLOCK YOU ARE DEBUGGING");
std::env::set_var(
"UNINITIALIZED_ACCOUNT_CLASS_HASH",
"CHECK THE KAKAROT UNINITIALIZED ACCOUNT CLASS HASH FOR THE BLOCK YOU ARE DEBUGGING",
);
// Given
let url = Url::parse("https://juno-kakarot-dev.karnot.xyz/").unwrap();
let starknet_provider = JsonRpcClient::new(HttpTransport::new(url));
// Start a local mongodb instance with the state of the network:
// - Install `mongod`.
// - Run `brew services start mongodb-community` on MacOS.
// - Connect to the remote mongodb instance using MongoCompass and export the headers collection
// and the transactions collection. Instructions for exporting/importing can be found at
// `https://www.mongodb.com/docs/compass/current/import-export/`.
// - Connect to the local mongodb instance using MongoCompass.
// - Import the headers and transactions collections.
// - ‼️ You might need to manually fix some transactions that don't have an `accessList` field. ‼️
// - ‼️ Be sure to import the collections in the database called `local`. ‼️
let db_client = mongodb::Client::with_uri_str("mongodb://localhost:27017/").await.unwrap();
let db = Database::new(
db_client.database_with_options(
"local",
DatabaseOptions::builder()
.read_concern(ReadConcern::majority())
.write_concern(WriteConcern::majority())
.build(),
),
);
let eth_provider = Arc::new(EthDataProvider::new(db, StarknetProvider::new(Arc::new(starknet_provider))));
let tracer = TracerBuilder::new(eth_provider)
.await
.unwrap()
.with_transaction_hash(B256::from_str("INSERT THE TRANSACTION HASH YOU WISH TO DEBUG").unwrap())
.await
.unwrap()
.with_tracing_options(TracingInspectorConfig::default_parity().into())
.build()
.unwrap();
// When
let _ = tracer.trace_block().unwrap();
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/models/block.rs | src/models/block.rs | use crate::{
into_via_try_wrapper,
providers::eth_provider::{constant::STARKNET_MODULUS, error::EthereumDataFormatError},
};
use alloy_eips::{BlockId as EthereumBlockId, BlockNumberOrTag};
use alloy_primitives::U256;
use starknet::core::types::{BlockId as StarknetBlockId, BlockTag};
#[derive(Debug)]
pub struct EthBlockId(EthereumBlockId);
impl EthBlockId {
pub const fn new(block_id: EthereumBlockId) -> Self {
Self(block_id)
}
}
impl TryFrom<EthBlockId> for StarknetBlockId {
type Error = EthereumDataFormatError;
fn try_from(eth_block_id: EthBlockId) -> Result<Self, Self::Error> {
match eth_block_id.0 {
// TODO: the conversion currently relies on a modulo operation to ensure compatibility with the StarkNet modulus.
// A revisit of this line is suggested when hash values are calculated as specified in the Ethereum specification.error
EthereumBlockId::Hash(hash) => Ok(Self::Hash(into_via_try_wrapper!(U256::from_be_slice(
hash.block_hash.as_slice()
)
.wrapping_rem(STARKNET_MODULUS))?)),
EthereumBlockId::Number(block_number_or_tag) => {
let block_number_or_tag: EthBlockNumberOrTag = block_number_or_tag.into();
Ok(block_number_or_tag.into())
}
}
}
}
impl From<EthBlockId> for EthereumBlockId {
fn from(eth_block_id: EthBlockId) -> Self {
eth_block_id.0
}
}
#[derive(Debug)]
pub struct EthBlockNumberOrTag(BlockNumberOrTag);
impl From<BlockNumberOrTag> for EthBlockNumberOrTag {
fn from(block_number_or_tag: BlockNumberOrTag) -> Self {
Self(block_number_or_tag)
}
}
impl From<EthBlockNumberOrTag> for BlockNumberOrTag {
fn from(eth_block_number_or_tag: EthBlockNumberOrTag) -> Self {
eth_block_number_or_tag.0
}
}
impl From<EthBlockNumberOrTag> for StarknetBlockId {
fn from(block_number_or_tag: EthBlockNumberOrTag) -> Self {
match block_number_or_tag.into() {
BlockNumberOrTag::Latest | BlockNumberOrTag::Pending => {
// We set to pending because in Starknet, a pending block is an unsealed block,
// With a centralized sequencer, the latest block is the pending block being filled.
Self::Tag(BlockTag::Pending)
}
BlockNumberOrTag::Safe | BlockNumberOrTag::Finalized => Self::Tag(BlockTag::Latest),
BlockNumberOrTag::Earliest => Self::Number(0),
BlockNumberOrTag::Number(number) => Self::Number(number),
}
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/models/mod.rs | src/models/mod.rs | pub mod block;
pub mod felt;
pub mod token;
pub mod transaction;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/models/felt.rs | src/models/felt.rs | use crate::providers::eth_provider::error::EthereumDataFormatError;
use alloy_primitives::{Address, B256, U256, U64};
use starknet::core::types::{EthAddress, Felt};
use std::ops::{Deref, DerefMut};
#[derive(Clone, Debug)]
pub struct Felt252Wrapper(Felt);
impl From<Felt> for Felt252Wrapper {
fn from(felt: Felt) -> Self {
Self(felt)
}
}
impl From<Felt252Wrapper> for Felt {
fn from(felt: Felt252Wrapper) -> Self {
felt.0
}
}
impl From<Address> for Felt252Wrapper {
fn from(address: Address) -> Self {
Self(Felt::from_bytes_be_slice(address.as_slice()))
}
}
impl From<U64> for Felt252Wrapper {
fn from(value: U64) -> Self {
value.to::<u64>().into()
}
}
impl From<u64> for Felt252Wrapper {
fn from(value: u64) -> Self {
Self(value.into())
}
}
impl From<u128> for Felt252Wrapper {
fn from(value: u128) -> Self {
Self(Felt::from(value))
}
}
impl TryFrom<Felt252Wrapper> for Address {
type Error = EthereumDataFormatError;
fn try_from(felt: Felt252Wrapper) -> Result<Self, Self::Error> {
EthAddress::from_felt(&felt)
.map(|eth_address| Self::from_slice(eth_address.as_bytes()))
.map_err(|_| EthereumDataFormatError::Primitive)
}
}
impl From<B256> for Felt252Wrapper {
fn from(value: B256) -> Self {
Self(Felt::from_bytes_be(value.as_ref()))
}
}
impl From<U256> for Felt252Wrapper {
fn from(u256: U256) -> Self {
Self(Felt::from_bytes_be(&u256.to_be_bytes()))
}
}
impl From<Felt252Wrapper> for U256 {
fn from(felt: Felt252Wrapper) -> Self {
Self::from_be_bytes(felt.to_bytes_be())
}
}
impl Deref for Felt252Wrapper {
type Target = Felt;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Felt252Wrapper {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// This macro provides a convenient way to convert a value from a source
/// type $val that implements Into<Felt252Wrapper> into a target type that
/// implements From<Felt252Wrapper>.
#[macro_export]
macro_rules! into_via_wrapper {
($val: expr) => {{
let intermediate: $crate::models::felt::Felt252Wrapper = $val.into();
intermediate.into()
}};
}
/// This macro provides a convenient way to convert a value from a source
/// type $val that implements `TryInto`<Felt252Wrapper> into a target type that
/// implements From<Felt252Wrapper>.
#[macro_export]
macro_rules! into_via_try_wrapper {
($val: expr) => {{
let intermediate: Result<_, $crate::providers::eth_provider::error::EthereumDataFormatError> =
TryInto::<$crate::models::felt::Felt252Wrapper>::try_into($val)
.map_err(|_| $crate::providers::eth_provider::error::EthereumDataFormatError::Primitive)
.map(Into::into);
intermediate
}};
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use hex::FromHex;
use super::*;
// 2**160 - 1
const MAX_ADDRESS: &str = "ffffffffffffffffffffffffffffffffffffffff";
// 2**160
const OVERFLOW_ADDRESS: &str = "010000000000000000000000000000000000000000";
// 2**251 + 17 * 2**192 + 1
const OVERFLOW_FELT: &str = "0800000000000011000000000000000000000000000000000000000000000001";
#[test]
fn test_address_try_from_felt_should_pass() {
// Given
let address: Felt252Wrapper = Felt::from_hex(MAX_ADDRESS).unwrap().into();
// When
let address = Address::try_from(address).unwrap();
// Then
let expected_address = <[u8; 20]>::from_hex(MAX_ADDRESS).unwrap();
assert_eq!(expected_address, address.0);
}
#[test]
#[should_panic(expected = "Primitive")]
fn test_address_try_from_felt_should_fail() {
// Given
let address: Felt252Wrapper = Felt::from_hex(OVERFLOW_ADDRESS).unwrap().into();
// When
Address::try_from(address).unwrap();
}
#[test]
fn test_felt_try_from_b256_should_pass() {
// Given
let hash = B256::from_slice(&Felt::MAX.to_bytes_be());
// When
let hash = Felt252Wrapper::from(hash);
// Then
let expected_hash = Felt::MAX;
assert_eq!(expected_hash, hash.0);
}
#[test]
fn test_felt_try_from_b256_should_fail() {
// Given
let hash = B256::from_str(OVERFLOW_FELT).unwrap();
// When
assert_eq!(Felt252Wrapper::from(hash).0, Felt::ZERO,);
}
#[test]
fn test_felt_try_from_u256_should_pass() {
// Given
let hash = U256::try_from_be_slice(&Felt::MAX.to_bytes_be()).unwrap();
// When
let hash = Felt252Wrapper::from(hash);
// Then
let expected_hash = Felt::MAX;
assert_eq!(expected_hash, hash.0);
}
#[test]
fn test_felt_try_from_u256_should_fail() {
// Given
let hash = U256::from_str_radix(OVERFLOW_FELT, 16).unwrap();
// When
assert_eq!(Felt252Wrapper::from(hash).0, Felt::ZERO,);
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/models/transaction.rs | src/models/transaction.rs | use crate::providers::eth_provider::{
provider::EthApiResult,
starknet::kakarot_core::{ETH_SEND_TRANSACTION, KAKAROT_ADDRESS},
utils::split_u256,
};
use alloy_consensus::transaction::Transaction as _;
use alloy_rlp::Encodable;
use reth_primitives::{transaction::legacy_parity, Transaction, TransactionSigned};
use starknet::core::types::Felt;
#[cfg(not(feature = "hive"))]
use {
crate::providers::eth_provider::error::EthApiError,
crate::providers::eth_provider::starknet::kakarot_core::MAX_FELTS_IN_CALLDATA,
};
/// Returns the transaction's signature as a [`Vec<Felt>`].
/// Fields r and s are split into two 16-bytes chunks both converted
/// to [`Felt`].
pub(crate) fn transaction_signature_to_field_elements(transaction_signed: &TransactionSigned) -> Vec<Felt> {
let transaction_signature = transaction_signed.signature();
let mut signature = Vec::with_capacity(5);
signature.extend_from_slice(&split_u256(transaction_signature.r()));
signature.extend_from_slice(&split_u256(transaction_signature.s()));
// Push the last element of the signature
// In case of a Legacy Transaction, it is v := {0, 1} + chain_id * 2 + 35
// or {0, 1} + 27 for pre EIP-155 transactions.
// Else, it is odd_y_parity
if let Transaction::Legacy(_) = transaction_signed.transaction {
let chain_id = transaction_signed.chain_id();
signature.push(legacy_parity(transaction_signature, chain_id).to_u64().into());
} else {
signature.push(transaction_signature.v().to_u64().into());
}
signature
}
/// Returns the transaction's data and signature combined into a
/// [`execute_from_outside`] type transaction. The payload still needs
/// to be signed by the relayer before broadcasting.
pub fn transaction_data_to_starknet_calldata(
transaction_signed: &TransactionSigned,
relayer_address: Felt,
) -> EthApiResult<Vec<Felt>> {
let mut signed_data = Vec::with_capacity(transaction_signed.transaction.length());
transaction_signed.transaction.encode_without_signature(&mut signed_data);
// Extract the signature from the signed transaction
let mut signature = transaction_signature_to_field_elements(transaction_signed);
// Pack the calldata in 31-byte chunks
let mut signed_data: Vec<Felt> = std::iter::once(Felt::from(signed_data.len()))
.chain(signed_data.chunks(31).map(Felt::from_bytes_be_slice))
.collect();
// Prepare the calldata for the Starknet invoke transaction
let capacity = 10 + signed_data.len() + signature.len() + 1;
// Check if call data is too large
#[cfg(not(feature = "hive"))]
if capacity > *MAX_FELTS_IN_CALLDATA {
return Err(EthApiError::CalldataExceededLimit(*MAX_FELTS_IN_CALLDATA, capacity));
}
let mut execute_from_outside_calldata = Vec::with_capacity(capacity);
// Construct the execute from outside calldata
// https://github.com/kkrt-labs/kakarot/blob/main/src/kakarot/accounts/account_contract.cairo#L73
execute_from_outside_calldata.append(&mut vec![
relayer_address, // OutsideExecution caller
Felt::ZERO, // OutsideExecution nonce
Felt::ZERO, // OutsideExecution execute_after
Felt::from(u32::MAX), // OutsideExecution execute_before
Felt::ONE, // call_array_len
*KAKAROT_ADDRESS, // CallArray to
*ETH_SEND_TRANSACTION, // CallArray selector
Felt::ZERO, // CallArray data_offset
signed_data.len().into(), // CallArray data_len
signed_data.len().into(), // CallArray calldata_len
]);
execute_from_outside_calldata.append(&mut signed_data);
execute_from_outside_calldata.push(signature.len().into());
execute_from_outside_calldata.append(&mut signature);
Ok(execute_from_outside_calldata)
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::TxEip2930;
use alloy_primitives::{bytes, hex, Signature, TxKind, U256};
use alloy_rlp::Decodable;
use std::str::FromStr;
#[test]
fn test_transaction_data_to_starknet_calldata() {
// Define a sample signed transaction.
// Using https://sepolia.kakarotscan.org/tx/0x5be347c9eb86cf04b884c7e6f432c6daa2054b46c3c70c7d4536e4c009765abe
let transaction = TransactionSigned::from_transaction_and_signature(Transaction::Eip2930(TxEip2930 {
chain_id: 1_802_203_764,
nonce: 33,
gas_price: 0,
gas_limit: 302_606,
to: TxKind::Create,
value: U256::ZERO,
access_list: Default::default(),
input: bytes!("608060405260405161040a38038061040a83398101604081905261002291610268565b61002c8282610033565b5050610352565b61003c82610092565b6040516001600160a01b038316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a280511561008657610081828261010e565b505050565b61008e610185565b5050565b806001600160a01b03163b6000036100cd57604051634c9c8ce360e01b81526001600160a01b03821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80546001600160a01b0319166001600160a01b0392909216919091179055565b6060600080846001600160a01b03168460405161012b9190610336565b600060405180830381855af49150503d8060008114610166576040519150601f19603f3d011682016040523d82523d6000602084013e61016b565b606091505b50909250905061017c8583836101a6565b95945050505050565b34156101a45760405163b398979f60e01b815260040160405180910390fd5b565b6060826101bb576101b682610205565b6101fe565b81511580156101d257506001600160a01b0384163b155b156101fb57604051639996b31560e01b81526001600160a01b03851660048201526024016100c4565b50805b9392505050565b8051156102155780518082602001fd5b604051630a12f52160e11b815260040160405180910390fd5b634e487b7160e01b600052604160045260246000fd5b60005b8381101561025f578181015183820152602001610247565b50506000910152565b6000806040838503121561027b57600080fd5b82516001600160a01b038116811461029257600080fd5b60208401519092506001600160401b03808211156102af57600080fd5b818501915085601f8301126102c357600080fd5b8151818111156102d5576102d561022e565b604051601f8201601f19908116603f011681019083821181831017156102fd576102fd61022e565b8160405282815288602084870101111561031657600080fd5b610327836020830160208801610244565b80955050505050509250929050565b60008251610348818460208701610244565b9190910192915050565b60aa806103606000396000f3fe6080604052600a600c565b005b60186014601a565b6051565b565b6000604c7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc546001600160a01b031690565b905090565b3660008037600080366000845af43d6000803e808015606f573d6000f35b3d6000fdfea2646970667358221220d0232cfa81216c3e4973e570f043b57ccb69ae4a81b8bc064338713721c87a9f64736f6c6343000814003300000000000000000000000009635f643e140090a9a8dcd712ed6285858cebef000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000647a1ac61e00000000000000000000000084ea74d481ee0a5332c457a4d796187f6ba67feb00000000000000000000000000000000000000000000000000038d7ea4c68000000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000")
}), Signature::from_rs_and_parity(U256::from_str("0x6290c177b6ee7b16d87909474a792d9ac022385505161e91191c57d666b61496").unwrap(), U256::from_str("0x7ba95168843acb8b888de596c28033c6c66a9cb6c7621cfc996bc5851115634d").unwrap(), true).expect("Failed to generate signature")
);
// Invoke the function to convert the transaction to Starknet format.
let calldata = transaction_data_to_starknet_calldata(&transaction, Felt::ZERO).unwrap();
// Assert the length of calldata.
// We must adapt the check as we pack the calldata in 31-byte chunks.
assert_eq!(calldata.len(), 59);
// Assert the first 6 elements of calldata.
assert_eq!(
calldata[0..10],
vec![
Felt::ZERO, // OutsideExecution caller
Felt::ZERO, // OutsideExecution nonce
Felt::ZERO, // OutsideExecution execute_after
Felt::from(u32::MAX), // OutsideExecution execute_before
Felt::ONE, // call_array_len
*KAKAROT_ADDRESS, // CallArray to
*ETH_SEND_TRANSACTION, // CallArray selector
Felt::ZERO, // CallArray data_offset
Felt::from((transaction.transaction.length() + 30) / 31 + 1), // CallArray data_len
Felt::from((transaction.transaction.length() + 30) / 31 + 1), // CallArray calldata_len
]
);
}
#[test]
#[should_panic(expected = "CalldataExceededLimit(22500, 30018)")]
fn test_transaction_data_to_starknet_calldata_too_large_calldata() {
// Test that an example create transaction from goerli decodes properly
let tx_bytes = hex!("b901f202f901ee05228459682f008459682f11830209bf8080b90195608060405234801561001057600080fd5b50610175806100206000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c80630c49c36c14610030575b600080fd5b61003861004e565b604051610045919061011d565b60405180910390f35b60606020600052600f6020527f68656c6c6f2073746174656d696e64000000000000000000000000000000000060405260406000f35b600081519050919050565b600082825260208201905092915050565b60005b838110156100be5780820151818401526020810190506100a3565b838111156100cd576000848401525b50505050565b6000601f19601f8301169050919050565b60006100ef82610084565b6100f9818561008f565b93506101098185602086016100a0565b610112816100d3565b840191505092915050565b6000602082019050818103600083015261013781846100e4565b90509291505056fea264697066735822122051449585839a4ea5ac23cae4552ef8a96b64ff59d0668f76bfac3796b2bdbb3664736f6c63430008090033c080a0136ebffaa8fc8b9fda9124de9ccb0b1f64e90fbd44251b4c4ac2501e60b104f9a07eb2999eec6d185ef57e91ed099afb0a926c5b536f0155dd67e537c7476e1471");
// Create a large tx_bytes by repeating the original tx_bytes 31 times
let mut large_tx_bytes = Vec::new();
for _ in 0..31 {
large_tx_bytes.extend_from_slice(&tx_bytes);
}
// Decode the transaction from the provided bytes
let mut transaction = TransactionSigned::decode(&mut &large_tx_bytes[..]).unwrap();
// Set the input of the transaction to be a vector of 30,000 zero bytes
transaction.transaction.set_input(vec![0; 30000 * 31].into());
// Attempt to convert the transaction into a Starknet transaction
transaction_data_to_starknet_calldata(&transaction, Felt::ZERO).unwrap();
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/models/token.rs | src/models/token.rs | use alloy_primitives::{Address, U256};
use serde::{Deserialize, Serialize};
/// Represents the balance of a specific ERC20 token.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct TokenBalance {
/// The address of the ERC20 token.
pub token_address: Address,
/// The balance of the ERC20 token.
pub token_balance: U256,
}
/// Represents the balances of multiple ERC20 tokens for a specific address.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct TokenBalances {
/// The address for which the token balances are queried.
pub address: Address,
/// A list of token balances associated with the address.
pub token_balances: Vec<TokenBalance>,
}
/// Represents the metadata (decimals, name, symbol) of an ERC20 token.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct TokenMetadata {
/// The number of decimals the token uses.
pub decimals: U256,
/// The name of the token.
pub name: String,
/// The symbol of the token.
pub symbol: String,
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/bin/hive_genesis.rs | src/bin/hive_genesis.rs | use clap::Parser;
use kakarot_rpc::test_utils::{hive::HiveGenesisConfig, katana::genesis::KatanaGenesisBuilder};
use starknet::core::types::Felt;
use std::path::PathBuf;
#[derive(Parser)]
struct Args {
#[clap(long, short)]
kakarot_contracts: PathBuf,
#[clap(long)]
hive_genesis: PathBuf,
#[clap(long, short)]
genesis_out: PathBuf,
#[clap(long, short)]
manifest_out: PathBuf,
}
fn main() {
let args = Args::parse();
let kakarot_contracts_path = args.kakarot_contracts;
let hive_genesis_path = args.hive_genesis;
let genesis_path = args.genesis_out;
let manifest_path = args.manifest_out;
// Read all the classes.
let mut builder = KatanaGenesisBuilder::default().load_classes(kakarot_contracts_path);
// Add dev allocations.
builder = builder.with_dev_allocation(10);
// Read the hive genesis.
let hive_genesis_content = std::fs::read_to_string(hive_genesis_path).expect("Failed to read hive genesis file");
let hive_genesis: HiveGenesisConfig =
serde_json::from_str(&hive_genesis_content).expect("Failed to parse hive genesis json");
let chain_id = hive_genesis.config.chain_id.into();
// Convert the hive genesis to a katana genesis.
let genesis_json =
hive_genesis.try_into_genesis_json(builder.clone()).expect("Failed to convert hive genesis to katana genesis");
let builder = builder.with_kakarot(Felt::ZERO, chain_id).expect("Failed to set up Kakarot");
let manifest = builder.manifest();
// Write the genesis json to the file.
std::fs::write(genesis_path, serde_json::to_string(&genesis_json).expect("Failed to serialize genesis json"))
.expect("Failed to write genesis json");
// Write the manifest to the file.
std::fs::write(manifest_path, serde_json::to_string(&manifest).expect("Failed to serialize manifest json"))
.expect("Failed to write manifest json");
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/bin/katana_genesis.rs | src/bin/katana_genesis.rs | use alloy_primitives::{B256, U256};
use dotenvy::dotenv;
use kakarot_rpc::test_utils::katana::genesis::KatanaGenesisBuilder;
use starknet::core::types::Felt;
use std::{
env::var,
path::{Path, PathBuf},
str::FromStr,
sync::LazyLock,
};
/// Katana genesis folder path.
static GENESIS_FOLDER_PATH: LazyLock<PathBuf> =
LazyLock::new(|| Path::new(env!("CARGO_MANIFEST_DIR")).to_path_buf().join(".katana"));
/// Kakarot contracts path.
static KAKAROT_CONTRACTS_PATH: LazyLock<PathBuf> =
LazyLock::new(|| Path::new(env!("CARGO_MANIFEST_DIR")).to_path_buf().join("lib/kakarot/build"));
/// Mock coinbase address.
static COINBASE_ADDRESS: LazyLock<Felt> = LazyLock::new(|| 0x12345u32.into());
static CHAIN_ID: LazyLock<Felt> = LazyLock::new(|| Felt::from_str("0xb615f74ebad2c").expect("Invalid chain ID"));
fn main() {
// Load the env vars.
dotenv().ok();
// Read the env vars.
let pk = B256::from_str(&var("EVM_PRIVATE_KEY").expect("Missing EVM private key"))
.expect("Failed to parse EVM private key");
// Read all the classes.
let mut builder = KatanaGenesisBuilder::default()
.load_classes(KAKAROT_CONTRACTS_PATH.clone())
.with_kakarot(*COINBASE_ADDRESS, *CHAIN_ID)
.expect("Failed to set up Kakarot");
builder = builder.with_eoa(pk).expect("Failed to set up EOA").fund(pk, U256::from(u128::MAX)).unwrap();
builder = builder.with_dev_allocation(10);
let manifest = builder.manifest();
let genesis = builder.build().expect("Failed to build genesis");
// Write the genesis json to the file.
std::fs::create_dir_all(GENESIS_FOLDER_PATH.as_path()).expect("Failed to create genesis directory");
let genesis_path = GENESIS_FOLDER_PATH.as_path().join("genesis.json");
std::fs::write(genesis_path, serde_json::to_string(&genesis).expect("Failed to serialize genesis json"))
.expect("Failed to write genesis json");
// Write the manifest to the file.
let manifest_path = GENESIS_FOLDER_PATH.as_path().join("manifest.json");
std::fs::write(manifest_path, serde_json::to_string(&manifest).expect("Failed to serialize manifest json"))
.expect("Failed to write manifest json");
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/bin/hive_chain.rs | src/bin/hive_chain.rs | #![allow(clippy::significant_drop_tightening)]
use alloy_primitives::bytes::{Buf, BytesMut};
use alloy_rlp::Decodable;
use clap::Parser;
use kakarot_rpc::{
into_via_try_wrapper,
providers::{eth_provider::starknet::relayer::Relayer, sn_provider::StarknetProvider},
};
use reth_primitives::{Block, BlockBody};
use starknet::{
core::types::{BlockId, BlockTag, Felt},
providers::{jsonrpc::HttpTransport, JsonRpcClient, Provider},
};
use std::{path::PathBuf, str::FromStr};
use tokio::{fs::File, io::AsyncReadExt};
use tokio_stream::StreamExt;
use tokio_util::codec::{Decoder, FramedRead};
use url::Url;
struct BlockFileCodec;
impl Decoder for BlockFileCodec {
type Item = Block;
type Error = eyre::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if src.is_empty() {
return Ok(None);
}
let buf_slice = &mut src.as_ref();
let body = Block::decode(buf_slice)?;
src.advance(src.len() - buf_slice.len());
Ok(Some(body))
}
}
/// The inputs to the binary.
#[derive(Parser, Debug)]
pub struct Args {
/// The path to the chain file for the hive test.
#[clap(short, long)]
chain_path: PathBuf,
/// The relayer address.
#[clap(long)]
relayer_address: Felt,
/// The relayer private key.
#[clap(long)]
relayer_pk: Felt,
}
const STARKNET_RPC_URL: &str = "http://0.0.0.0:5050";
const MAX_FELTS_IN_CALLDATA: &str = "30000";
/// Inspired by the Import command from Reth.
/// https://github.com/paradigmxyz/reth/blob/main/bin/reth/src/commands/import.rs
#[tokio::main]
async fn main() -> eyre::Result<()> {
let args = Args::parse();
// Get the provider
let provider = JsonRpcClient::new(HttpTransport::new(Url::from_str(STARKNET_RPC_URL)?));
let starknet_provider = StarknetProvider::new(provider);
// Set the env
std::env::set_var("RELAYER_PRIVATE_KEY", format!("0x{:x}", args.relayer_pk));
std::env::set_var("MAX_FELTS_IN_CALLDATA", MAX_FELTS_IN_CALLDATA);
std::env::set_var("STARKNET_NETWORK", STARKNET_RPC_URL);
// Prepare the relayer
let relayer_balance = starknet_provider.balance_at(args.relayer_address, BlockId::Tag(BlockTag::Latest)).await?;
let relayer_balance = into_via_try_wrapper!(relayer_balance)?;
let relayer = Relayer::new(
args.relayer_address,
relayer_balance,
JsonRpcClient::new(HttpTransport::new(Url::from_str(STARKNET_RPC_URL)?)),
None,
);
// Read the rlp file
let mut file = File::open(args.chain_path).await?;
let metadata = file.metadata().await?;
let file_len = metadata.len();
// Read the entire file into memory
let mut reader = vec![];
file.read_to_end(&mut reader).await?;
let mut stream = FramedRead::with_capacity(&reader[..], BlockFileCodec, file_len as usize);
// Extract the block
let mut bodies: Vec<BlockBody> = Vec::new();
while let Some(block_res) = stream.next().await {
let block = block_res?;
bodies.push(block.into());
}
for (block_number, body) in bodies.into_iter().enumerate() {
while starknet_provider.block_number().await? < block_number as u64 {
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
}
for transaction in &body.transactions {
relayer.relay_transaction(transaction).await?;
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
}
}
Ok(())
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/alchemy_provider.rs | src/providers/alchemy_provider.rs | use crate::{
models::token::{TokenBalance, TokenBalances, TokenMetadata},
providers::eth_provider::{
contracts::erc20::EthereumErc20,
error::EthApiError,
provider::{EthApiResult, EthereumProvider},
},
};
use alloy_eips::BlockNumberOrTag;
use alloy_primitives::{Address, U256};
use async_trait::async_trait;
use auto_impl::auto_impl;
use eyre::Result;
use futures::future::join_all;
use mongodb::bson::doc;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait AlchemyProvider {
/// Retrieves the token balances for a given address.
async fn token_balances(&self, address: Address, contract_addresses: Vec<Address>) -> EthApiResult<TokenBalances>;
/// Retrieves the metadata for a given token.
async fn token_metadata(&self, contract_address: Address) -> EthApiResult<TokenMetadata>;
/// Retrieves the allowance for a given token.
async fn token_allowance(&self, contract_address: Address, owner: Address, spender: Address) -> EthApiResult<U256>;
}
#[derive(Debug, Clone)]
pub struct AlchemyDataProvider<P: EthereumProvider> {
eth_provider: P,
}
impl<P: EthereumProvider> AlchemyDataProvider<P> {
pub const fn new(eth_provider: P) -> Self {
Self { eth_provider }
}
}
#[async_trait]
impl<P: EthereumProvider + Send + Sync + 'static> AlchemyProvider for AlchemyDataProvider<P> {
async fn token_balances(&self, address: Address, contract_addresses: Vec<Address>) -> EthApiResult<TokenBalances> {
// Set the block ID to the latest block
let block_id = BlockNumberOrTag::Latest.into();
Ok(TokenBalances {
address,
token_balances: join_all(contract_addresses.into_iter().map(|token_address| async move {
// Create a new instance of `EthereumErc20` for each token address
let token = EthereumErc20::new(token_address, &self.eth_provider);
// Retrieve the balance for the given address
let token_balance = token.balance_of(address, block_id).await?;
Ok(TokenBalance { token_address, token_balance })
}))
.await
.into_iter()
.collect::<Result<Vec<_>, EthApiError>>()?,
})
}
/// Retrieves the metadata for a given token.
async fn token_metadata(&self, contract_address: Address) -> EthApiResult<TokenMetadata> {
// Set the block ID to the latest block
let block_id = BlockNumberOrTag::Latest.into();
// Create a new instance of `EthereumErc20`
let token = EthereumErc20::new(contract_address, &self.eth_provider);
// Await all futures concurrently to retrieve decimals, name, and symbol
let (decimals, name, symbol) =
futures::try_join!(token.decimals(block_id), token.name(block_id), token.symbol(block_id))?;
// Return the metadata
Ok(TokenMetadata { decimals, name, symbol })
}
/// Retrieves the allowance of a given owner for a spender.
async fn token_allowance(&self, contract_address: Address, owner: Address, spender: Address) -> EthApiResult<U256> {
// Set the block ID to the latest block
let block_id = BlockNumberOrTag::Latest.into();
// Create a new instance of `EthereumErc20`
let token = EthereumErc20::new(contract_address, &self.eth_provider);
// Retrieve the allowance for the given owner and spender
let allowance = token.allowance(owner, spender, block_id).await?;
// Return the allowance
Ok(allowance)
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/debug_provider.rs | src/providers/debug_provider.rs | use crate::{
providers::eth_provider::{
error::{EthApiError, SignatureError},
provider::{EthApiResult, EthereumProvider},
},
tracing::builder::TracerBuilder,
};
use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag};
use alloy_primitives::{Bytes, B256};
use alloy_rlp::Encodable;
use alloy_rpc_types::TransactionRequest;
use alloy_rpc_types_trace::geth::{GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult};
use async_trait::async_trait;
use auto_impl::auto_impl;
use reth_primitives::{Block, Header, Log, Receipt, ReceiptWithBloom, TransactionSigned};
use std::sync::Arc;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait DebugProvider {
async fn raw_header(&self, block_id: BlockId) -> EthApiResult<Bytes>;
async fn raw_block(&self, block_id: BlockId) -> EthApiResult<Bytes>;
async fn raw_transaction(&self, hash: B256) -> EthApiResult<Option<Bytes>>;
async fn raw_transactions(&self, block_id: BlockId) -> EthApiResult<Vec<Bytes>>;
async fn raw_receipts(&self, block_id: BlockId) -> EthApiResult<Vec<Bytes>>;
async fn trace_block_by_number(
&self,
block_number: BlockNumberOrTag,
opts: Option<GethDebugTracingOptions>,
) -> EthApiResult<Vec<TraceResult>>;
async fn trace_block_by_hash(
&self,
block_hash: B256,
opts: Option<GethDebugTracingOptions>,
) -> EthApiResult<Vec<TraceResult>>;
async fn trace_transaction(
&self,
transaction_hash: B256,
opts: Option<GethDebugTracingOptions>,
) -> EthApiResult<GethTrace>;
async fn trace_call(
&self,
request: TransactionRequest,
block_number: Option<BlockId>,
opts: Option<GethDebugTracingCallOptions>,
) -> EthApiResult<GethTrace>;
}
#[derive(Debug, Clone)]
pub struct DebugDataProvider<P: EthereumProvider> {
eth_provider: P,
}
impl<P: EthereumProvider> DebugDataProvider<P> {
pub const fn new(eth_provider: P) -> Self {
Self { eth_provider }
}
}
#[async_trait]
impl<P: EthereumProvider + Send + Sync + 'static> DebugProvider for DebugDataProvider<P> {
async fn raw_header(&self, block_id: BlockId) -> EthApiResult<Bytes> {
let mut res = Vec::new();
if let Some(header) = self.eth_provider.header(&block_id).await?.map(Header::try_from).transpose()? {
header.encode(&mut res);
}
Ok(res.into())
}
async fn raw_block(&self, block_id: BlockId) -> EthApiResult<Bytes> {
let block = match block_id {
BlockId::Hash(hash) => self.eth_provider.block_by_hash(hash.into(), true).await?,
BlockId::Number(number) => self.eth_provider.block_by_number(number, true).await?,
};
let mut raw_block = Vec::new();
if let Some(block) = block {
let block = Block::try_from(block.inner)?;
block.encode(&mut raw_block);
}
Ok(raw_block.into())
}
async fn raw_transaction(&self, hash: B256) -> EthApiResult<Option<Bytes>> {
let transaction = self.eth_provider.transaction_by_hash(hash).await?;
if let Some(tx) = transaction {
let signature = tx.signature.ok_or_else(|| EthApiError::from(SignatureError::MissingSignature))?;
let bytes = TransactionSigned::from_transaction_and_signature(
tx.try_into()?,
alloy_primitives::Signature::from_rs_and_parity(
signature.r,
signature.s,
signature.y_parity.map_or(false, |v| v.0),
)
.expect("Invalid signature"),
)
.encoded_2718()
.into();
Ok(Some(bytes))
} else {
Ok(None)
}
}
async fn raw_transactions(&self, block_id: BlockId) -> EthApiResult<Vec<Bytes>> {
let transactions = self.eth_provider.block_transactions(Some(block_id)).await?.unwrap_or_default();
let mut raw_transactions = Vec::with_capacity(transactions.len());
for t in transactions {
let signature = t.signature.ok_or_else(|| EthApiError::from(SignatureError::MissingSignature))?;
let bytes = TransactionSigned::from_transaction_and_signature(
t.try_into()?,
alloy_primitives::Signature::from_rs_and_parity(
signature.r,
signature.s,
signature.y_parity.map_or(false, |v| v.0),
)
.expect("Invalid signature"),
)
.encoded_2718()
.into();
raw_transactions.push(bytes);
}
Ok(raw_transactions)
}
async fn raw_receipts(&self, block_id: BlockId) -> EthApiResult<Vec<Bytes>> {
let receipts = self.eth_provider.block_receipts(Some(block_id)).await?.unwrap_or_default();
// Initializes an empty vector to store the raw receipts
let mut raw_receipts = Vec::with_capacity(receipts.len());
// Iterates through the receipts of the block using the `block_receipts` method of the Ethereum API
for receipt in receipts {
// Converts the transaction type to a u8 and then tries to convert it into TxType
let tx_type = Into::<u8>::into(receipt.transaction_type()).try_into()?;
// Tries to convert the cumulative gas used to u64
let cumulative_gas_used = receipt.inner.inner.cumulative_gas_used().try_into()?;
// Creates a ReceiptWithBloom from the receipt data
raw_receipts.push(
ReceiptWithBloom {
receipt: Receipt {
tx_type,
success: receipt.inner.status(),
cumulative_gas_used,
logs: receipt
.inner
.inner
.logs()
.iter()
.filter_map(|log| Log::new(log.address(), log.topics().to_vec(), log.data().data.clone()))
.collect(),
},
bloom: *receipt.inner.inner.logs_bloom(),
}
.encoded_2718()
.into(),
);
}
// Returns the vector containing the raw receipts
Ok(raw_receipts)
}
async fn trace_block_by_number(
&self,
block_number: BlockNumberOrTag,
opts: Option<GethDebugTracingOptions>,
) -> EthApiResult<Vec<TraceResult>> {
let provider = Arc::new(&self.eth_provider);
let tracer = TracerBuilder::new(provider)
.await?
.with_block_id(block_number.into())
.await?
.with_tracing_options(opts.unwrap_or_default().into())
.build()?;
Ok(tracer.debug_block()?)
}
async fn trace_block_by_hash(
&self,
block_hash: B256,
opts: Option<GethDebugTracingOptions>,
) -> EthApiResult<Vec<TraceResult>> {
let tracer = TracerBuilder::new(Arc::new(&self.eth_provider))
.await?
.with_block_id(block_hash.into())
.await?
.with_tracing_options(opts.unwrap_or_default().into())
.build()?;
Ok(tracer.debug_block()?)
}
async fn trace_transaction(
&self,
transaction_hash: B256,
opts: Option<GethDebugTracingOptions>,
) -> EthApiResult<GethTrace> {
let tracer = TracerBuilder::new(Arc::new(&self.eth_provider))
.await?
.with_transaction_hash(transaction_hash)
.await?
.with_tracing_options(opts.unwrap_or_default().into())
.build()?;
Ok(tracer.debug_transaction(transaction_hash)?)
}
async fn trace_call(
&self,
request: TransactionRequest,
block_number: Option<BlockId>,
opts: Option<GethDebugTracingCallOptions>,
) -> EthApiResult<GethTrace> {
let tracer = TracerBuilder::new(Arc::new(&self.eth_provider))
.await?
.with_block_id(block_number.unwrap_or_default())
.await?
.with_tracing_options(opts.unwrap_or_default().into())
.build()?;
Ok(tracer.debug_transaction_request(&request)?)
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/pool_provider.rs | src/providers/pool_provider.rs | use super::eth_provider::TxPoolProvider;
use crate::providers::eth_provider::{database::types::transaction::ExtendedTransaction, provider::EthApiResult};
use alloy_primitives::Address;
use alloy_rpc_types_txpool::{TxpoolContent, TxpoolContentFrom, TxpoolInspect, TxpoolInspectSummary, TxpoolStatus};
use async_trait::async_trait;
use auto_impl::auto_impl;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait PoolProvider {
async fn txpool_status(&self) -> EthApiResult<TxpoolStatus>;
async fn txpool_inspect(&self) -> EthApiResult<TxpoolInspect>;
async fn txpool_content_from(&self, from: Address) -> EthApiResult<TxpoolContentFrom<ExtendedTransaction>>;
async fn txpool_content(&self) -> EthApiResult<TxpoolContent<ExtendedTransaction>>;
}
#[derive(Debug, Clone)]
pub struct PoolDataProvider<P: TxPoolProvider> {
eth_provider: P,
}
impl<P: TxPoolProvider> PoolDataProvider<P> {
pub const fn new(eth_provider: P) -> Self {
Self { eth_provider }
}
}
#[async_trait]
impl<P: TxPoolProvider + Send + Sync + 'static> PoolProvider for PoolDataProvider<P> {
async fn txpool_status(&self) -> EthApiResult<TxpoolStatus> {
let all = self.eth_provider.txpool_content().await?;
Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 })
}
async fn txpool_inspect(&self) -> EthApiResult<TxpoolInspect> {
let mut inspect = TxpoolInspect::default();
let transactions = self.eth_provider.content();
// Organize the pending transactions in the inspect summary struct.
for (sender, nonce_transaction) in transactions.pending {
for (nonce, transaction) in nonce_transaction {
inspect.pending.entry((*sender).into()).or_default().insert(
nonce.clone(),
TxpoolInspectSummary {
to: transaction.to,
value: transaction.value,
gas: transaction.gas.into(),
gas_price: transaction.gas_price.unwrap_or_default(),
},
);
}
}
// Organize the queued transactions in the inspect summary struct.
for (sender, nonce_transaction) in transactions.queued {
for (nonce, transaction) in nonce_transaction {
inspect.queued.entry((*sender).into()).or_default().insert(
nonce.clone(),
TxpoolInspectSummary {
to: transaction.to,
value: transaction.value,
gas: transaction.gas.into(),
gas_price: transaction.gas_price.unwrap_or_default(),
},
);
}
}
Ok(inspect)
}
async fn txpool_content_from(&self, from: Address) -> EthApiResult<TxpoolContentFrom<ExtendedTransaction>> {
Ok(self.eth_provider.txpool_content().await?.remove_from(&from))
}
async fn txpool_content(&self) -> EthApiResult<TxpoolContent<ExtendedTransaction>> {
Ok(self.eth_provider.txpool_content().await?)
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/transactions.rs | src/providers/eth_provider/transactions.rs | use super::{
database::{
filter::EthDatabaseFilterBuilder,
types::transaction::{ExtendedTransaction, StoredTransaction},
},
error::ExecutionError,
starknet::kakarot_core::{account_contract::AccountContractReader, starknet_address},
utils::{contract_not_found, entrypoint_not_found},
};
use crate::{
into_via_wrapper,
providers::eth_provider::{
database::filter::{self},
provider::{EthApiResult, EthDataProvider},
ChainProvider,
},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{Address, B256, U256};
use alloy_rpc_types::Index;
use async_trait::async_trait;
use auto_impl::auto_impl;
use mongodb::bson::doc;
use tracing::Instrument;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait TransactionProvider: ChainProvider {
/// Returns the transaction by hash.
async fn transaction_by_hash(&self, hash: B256) -> EthApiResult<Option<ExtendedTransaction>>;
/// Returns the transaction by block hash and index.
async fn transaction_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> EthApiResult<Option<ExtendedTransaction>>;
/// Returns the transaction by block number and index.
async fn transaction_by_block_number_and_index(
&self,
number_or_tag: BlockNumberOrTag,
index: Index,
) -> EthApiResult<Option<ExtendedTransaction>>;
/// Returns the nonce for the address at the given block.
async fn transaction_count(&self, address: Address, block_id: Option<BlockId>) -> EthApiResult<U256>;
}
#[async_trait]
impl<SP> TransactionProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn transaction_by_hash(&self, hash: B256) -> EthApiResult<Option<ExtendedTransaction>> {
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default().with_tx_hash(&hash).build();
Ok(self.database().get_one::<StoredTransaction>(filter, None).await?.map(Into::into))
}
async fn transaction_by_block_hash_and_index(
&self,
hash: B256,
index: Index,
) -> EthApiResult<Option<ExtendedTransaction>> {
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default()
.with_block_hash(&hash)
.with_tx_index(&index)
.build();
Ok(self.database().get_one::<StoredTransaction>(filter, None).await?.map(Into::into))
}
async fn transaction_by_block_number_and_index(
&self,
number_or_tag: BlockNumberOrTag,
index: Index,
) -> EthApiResult<Option<ExtendedTransaction>> {
let block_number = self.tag_into_block_number(number_or_tag).await?;
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default()
.with_block_number(block_number)
.with_tx_index(&index)
.build();
Ok(self.database().get_one::<StoredTransaction>(filter, None).await?.map(Into::into))
}
async fn transaction_count(&self, address: Address, block_id: Option<BlockId>) -> EthApiResult<U256> {
let starknet_block_id = self.to_starknet_block_id(block_id).await?;
let address = starknet_address(address);
let account_contract = AccountContractReader::new(address, self.starknet_provider_inner());
let span = tracing::span!(tracing::Level::INFO, "sn::kkrt_nonce");
let maybe_nonce = account_contract.get_nonce().block_id(starknet_block_id).call().instrument(span).await;
if contract_not_found(&maybe_nonce) || entrypoint_not_found(&maybe_nonce) {
return Ok(U256::ZERO);
}
let nonce = maybe_nonce.map_err(ExecutionError::from)?.nonce;
Ok(into_via_wrapper!(nonce))
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/state.rs | src/providers/eth_provider/state.rs | use super::{
database::state::{EthCacheDatabase, EthDatabase},
error::{EthApiError, ExecutionError, TransactionError},
starknet::kakarot_core::{account_contract::AccountContractReader, starknet_address},
utils::{contract_not_found, entrypoint_not_found, split_u256},
};
use crate::{
into_via_wrapper,
providers::eth_provider::{
provider::{EthApiResult, EthDataProvider},
BlockProvider, ChainProvider,
},
};
use alloy_eips::BlockId;
use alloy_primitives::{Address, Bytes, B256, U256};
use alloy_rpc_types::{
serde_helpers::JsonStorageKey,
state::{EvmOverrides, StateOverride},
BlockOverrides, TransactionRequest,
};
use async_trait::async_trait;
use auto_impl::auto_impl;
use mongodb::bson::doc;
use num_traits::cast::ToPrimitive;
use reth_evm_ethereum::EthEvmConfig;
use reth_node_api::ConfigureEvm;
use reth_revm::db::CacheDB;
use reth_rpc_eth_types::error::ensure_success;
use starknet::core::utils::get_storage_var_address;
use std::sync::Arc;
use tracing::Instrument;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait StateProvider: ChainProvider + BlockProvider {
/// Returns the balance of an address in native eth.
async fn balance(&self, address: Address, block_id: Option<BlockId>) -> EthApiResult<U256>;
/// Returns the storage of an address at a certain index.
async fn storage_at(
&self,
address: Address,
index: JsonStorageKey,
block_id: Option<BlockId>,
) -> EthApiResult<B256>;
/// Returns the code for the address at the given block.
async fn get_code(&self, address: Address, block_id: Option<BlockId>) -> EthApiResult<Bytes>;
/// Returns the result of a call.
async fn call(
&self,
request: TransactionRequest,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> EthApiResult<Bytes>;
}
#[async_trait]
impl<SP> StateProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn balance(&self, address: Address, block_id: Option<BlockId>) -> EthApiResult<U256> {
// Convert the optional Ethereum block ID to a Starknet block ID.
let starknet_block_id = self.to_starknet_block_id(block_id).await?;
// Get the balance of the address at the given block ID.
self.starknet_provider().balance_at(starknet_address(address), starknet_block_id).await.map_err(Into::into)
}
async fn storage_at(
&self,
address: Address,
index: JsonStorageKey,
block_id: Option<BlockId>,
) -> EthApiResult<B256> {
let starknet_block_id = self.to_starknet_block_id(block_id).await?;
let address = starknet_address(address);
let contract = AccountContractReader::new(address, self.starknet_provider_inner());
let keys = split_u256(index.0);
let storage_address = get_storage_var_address("Account_storage", &keys).expect("Storage var name is not ASCII");
let span = tracing::span!(tracing::Level::INFO, "sn::storage");
let maybe_storage =
contract.storage(&storage_address).block_id(starknet_block_id).call().instrument(span).await;
if contract_not_found(&maybe_storage) || entrypoint_not_found(&maybe_storage) {
return Ok(U256::ZERO.into());
}
let storage = maybe_storage.map_err(ExecutionError::from)?.value;
let low: U256 = into_via_wrapper!(storage.low);
let high: U256 = into_via_wrapper!(storage.high);
let storage: U256 = low + (high << 128);
Ok(storage.into())
}
async fn get_code(&self, address: Address, block_id: Option<BlockId>) -> EthApiResult<Bytes> {
let starknet_block_id = self.to_starknet_block_id(block_id).await?;
let address = starknet_address(address);
let account_contract = AccountContractReader::new(address, self.starknet_provider_inner());
let span = tracing::span!(tracing::Level::INFO, "sn::code");
let bytecode = account_contract.bytecode().block_id(starknet_block_id).call().instrument(span).await;
if contract_not_found(&bytecode) || entrypoint_not_found(&bytecode) {
return Ok(Bytes::default());
}
let bytecode = bytecode.map_err(ExecutionError::from)?.bytecode.0;
Ok(Bytes::from(bytecode.into_iter().filter_map(|x| x.to_u8()).collect::<Vec<_>>()))
}
async fn call(
&self,
request: TransactionRequest,
block_id: Option<BlockId>,
state_overrides: Option<StateOverride>,
block_overrides: Option<Box<BlockOverrides>>,
) -> EthApiResult<Bytes> {
// Create the EVM overrides from the state and block overrides.
let evm_overrides = EvmOverrides::new(state_overrides, block_overrides);
// Check if either state_overrides or block_overrides is present.
if evm_overrides.has_state() || evm_overrides.has_block() {
// // Create the configuration environment with the chain ID.
// let cfg_env = CfgEnv::default().with_chain_id(self.chain_id().await?.unwrap_or_default().to());
// // Retrieve the block header details.
// let Header { number, timestamp, miner, base_fee_per_gas, difficulty, .. } =
// self.header(&block_id.unwrap_or_default()).await?.unwrap_or_default();
// // Create the block environment with the retrieved header details and transaction request.
// let block_env = BlockEnv {
// number: U256::from(number),
// timestamp: U256::from(timestamp),
// gas_limit: U256::from(request.gas.unwrap_or_default()),
// coinbase: miner,
// basefee: U256::from(base_fee_per_gas.unwrap_or_default()),
// prevrandao: Some(B256::from_slice(&difficulty.to_be_bytes::<32>()[..])),
// ..Default::default()
// };
// // Combine the configuration environment with the handler configuration.
// let cfg_env_with_handler_cfg =
// CfgEnvWithHandlerCfg { cfg_env, handler_cfg: HandlerCfg::new(SpecId::CANCUN) };
// Create a snapshot of the Ethereum database using the block ID.
let db = EthCacheDatabase(CacheDB::new(EthDatabase::new(self, block_id.unwrap_or_default())));
// TODO: no more prepare_call_env function (was put in trait, to be discussed)
// // Prepare the call environment with the transaction request, gas limit, and overrides.
// let env = prepare_call_env(
// cfg_env_with_handler_cfg,
// block_env,
// request.clone(),
// request.gas.unwrap_or_default().try_into().expect("Gas limit is too large"),
// &mut db.0,
// evm_overrides,
// )?;
// Execute the transaction using the configured EVM asynchronously.
let res = EthEvmConfig::new(Arc::new(Default::default()))
// TODO: We should not use default here, to be modified to activate the correct EVM config
.evm_with_env(db.0, Default::default())
.transact()
.map_err(|err| <TransactionError as Into<EthApiError>>::into(TransactionError::Call(err.into())))?;
// Ensure the transaction was successful and return the result.
return Ok(ensure_success(res.result)?);
}
// If no state or block overrides are present, call the helper function to execute the call.
let output = self.call_inner(request, block_id).await?;
Ok(Bytes::from(output.0.into_iter().filter_map(|x| x.to_u8()).collect::<Vec<_>>()))
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/chain.rs | src/providers/eth_provider/chain.rs | use crate::providers::eth_provider::{
error::KakarotError,
provider::{EthApiResult, EthDataProvider},
};
use alloy_primitives::{U256, U64};
use alloy_rpc_types::{SyncInfo, SyncStatus};
use async_trait::async_trait;
use auto_impl::auto_impl;
use starknet::core::types::SyncStatusType;
use tracing::Instrument;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait ChainProvider {
/// Returns the syncing status.
async fn syncing(&self) -> EthApiResult<SyncStatus>;
/// Returns the chain id.
async fn chain_id(&self) -> EthApiResult<Option<U64>>;
}
#[async_trait]
impl<SP> ChainProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn syncing(&self) -> EthApiResult<SyncStatus> {
let span = tracing::span!(tracing::Level::INFO, "sn::syncing");
Ok(match self.starknet_provider_inner().syncing().instrument(span).await.map_err(KakarotError::from)? {
SyncStatusType::NotSyncing => SyncStatus::None,
SyncStatusType::Syncing(data) => SyncStatus::Info(Box::new(SyncInfo {
starting_block: U256::from(data.starting_block_num),
current_block: U256::from(data.current_block_num),
highest_block: U256::from(data.highest_block_num),
..Default::default()
})),
})
}
async fn chain_id(&self) -> EthApiResult<Option<U64>> {
Ok(Some(U64::from(self.chain_id)))
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/error.rs | src/providers/eth_provider/error.rs | use alloy_primitives::{Bytes, B256};
use alloy_rpc_types::BlockHashOrNumber;
use alloy_sol_types::decode_revert_reason;
use jsonrpsee::types::ErrorObject;
use num_traits::cast::ToPrimitive;
use reth_rpc_eth_types::{error::ToRpcError, EthApiError as RethEthApiError};
use reth_transaction_pool::error::PoolError;
use starknet::core::types::Felt;
use thiserror::Error;
/// List of JSON-RPC error codes from ETH rpc spec.
/// <https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1474.md>
#[derive(Debug, Copy, PartialEq, Eq, Clone)]
pub enum EthRpcErrorCode {
/// Custom geth error code, <https://github.com/vapory-legacy/wiki/blob/master/JSON-RPC-Error-Codes-Improvement-Proposal.md>
Unknown,
ExecutionError = 3,
ParseError = -32700,
InvalidRequest = -32600,
MethodNotFound = -32601,
InvalidParams = -32602,
InternalError = -32603,
InvalidInput = -32000,
ResourceNotFound = -32001,
ResourceUnavailable = -32002,
TransactionRejected = -32003,
MethodNotSupported = -32004,
RequestLimitExceeded = -32005,
JsonRpcVersionUnsupported = -32006,
}
impl From<&EthApiError> for EthRpcErrorCode {
fn from(error: &EthApiError) -> Self {
match error {
EthApiError::UnknownBlock(_) | EthApiError::UnknownBlockNumber(_) | EthApiError::TransactionNotFound(_) => {
Self::ResourceNotFound
}
EthApiError::Signature(_)
| EthApiError::EthereumDataFormat(_)
| EthApiError::CalldataExceededLimit(_, _)
| EthApiError::RethEthApi(_) => Self::InvalidParams,
EthApiError::Transaction(err) => err.into(),
// TODO improve the error
EthApiError::Unsupported(_) | EthApiError::Kakarot(_) | EthApiError::Pool(_) => Self::InternalError,
EthApiError::Execution(_) => Self::ExecutionError,
}
}
}
impl From<EthApiError> for RethEthApiError {
fn from(value: EthApiError) -> Self {
Self::other(value)
}
}
/// Error that can occur when interacting with the ETH Api.
#[derive(Debug, Error)]
pub enum EthApiError {
/// When a block is not found
UnknownBlock(BlockHashOrNumber),
/// When an unknown block number is encountered
UnknownBlockNumber(Option<u64>),
/// When a transaction is not found
TransactionNotFound(B256),
/// Error related to transaction
Transaction(#[from] TransactionError),
/// Error related to transaction pool
Pool(#[from] PoolError),
/// Error related to signing
Signature(#[from] SignatureError),
/// Unsupported feature
Unsupported(&'static str),
/// Ethereum data format error
EthereumDataFormat(#[from] EthereumDataFormatError),
/// Execution error
Execution(#[from] ExecutionError),
/// Kakarot related error (database, ...)
Kakarot(KakarotError),
/// Error related to transaction calldata being too large.
CalldataExceededLimit(usize, usize),
/// Reth Eth API error
RethEthApi(#[from] RethEthApiError),
}
impl std::fmt::Display for EthApiError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::UnknownBlock(block) => write!(f, "unknown block {block}"),
Self::UnknownBlockNumber(block) => write!(f, "unknown block number {block:?}"),
Self::TransactionNotFound(tx) => write!(f, "transaction not found {tx}"),
Self::Transaction(err) => write!(f, "{err}"),
Self::Pool(err) => write!(f, "{err}"),
Self::Signature(err) => write!(f, "{err}"),
Self::RethEthApi(err) => write!(f, "{err}"),
Self::Unsupported(feature) => write!(f, "unsupported: {feature}"),
Self::EthereumDataFormat(err) => write!(f, "ethereum data format error: {err}"),
Self::Execution(err) => write!(f, "{err}"),
Self::Kakarot(KakarotError::Provider(err)) => {
// We use Debug here otherwise we risk losing some information on contract error
write!(f, "starknet provider error: {err:?}")
}
Self::Kakarot(err) => write!(f, "kakarot error: {err}"),
Self::CalldataExceededLimit(limit, actual) => {
write!(f, "calldata exceeded limit of {limit}: {actual}")
}
}
}
}
/// Constructs a JSON-RPC error object, consisting of `code` and `message`.
impl From<EthApiError> for ErrorObject<'static> {
fn from(value: EthApiError) -> Self {
(&value).into()
}
}
/// Constructs a JSON-RPC error object, consisting of `code` and `message`.
impl From<&EthApiError> for ErrorObject<'static> {
fn from(value: &EthApiError) -> Self {
let msg = format!("{value}");
let code = EthRpcErrorCode::from(value);
let data = match value {
EthApiError::Execution(ExecutionError::Evm(EvmError::Other(ref b))) => Some(b.clone()),
_ => None,
};
ErrorObject::owned(code as i32, msg, data)
}
}
impl ToRpcError for EthApiError {
fn to_rpc_error(&self) -> ErrorObject<'static> {
self.into()
}
}
/// Error related to the Kakarot eth provider
/// which utilizes the starknet provider and
/// a database internally.
#[derive(Debug, Error)]
pub enum KakarotError {
/// Error related to the starknet provider.
#[error(transparent)]
Provider(#[from] starknet::providers::ProviderError),
/// Error related to the database.
#[error(transparent)]
Database(#[from] mongodb::error::Error),
/// Error related to the database deserialization.
#[error(transparent)]
DatabaseDeserialization(#[from] mongodb::bson::de::Error),
}
impl From<KakarotError> for EthApiError {
fn from(value: KakarotError) -> Self {
Self::Kakarot(value)
}
}
/// Error related to execution errors, by the EVM or Cairo vm.
#[derive(Debug, Error)]
pub enum ExecutionError {
/// Error related to the EVM execution failures.
Evm(#[from] EvmError),
/// Error related to the Cairo vm execution failures.
CairoVm(#[from] CairoError),
/// Other execution error.
Other(String),
}
impl From<cainome::cairo_serde::Error> for ExecutionError {
fn from(error: cainome::cairo_serde::Error) -> Self {
let error = error.to_string();
if error.contains("RunResources has no remaining steps.") {
return Self::CairoVm(CairoError::VmOutOfResources);
}
Self::Other(error)
}
}
impl std::fmt::Display for ExecutionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("execution reverted")?;
match self {
Self::Evm(err) => match err {
EvmError::Other(b) => {
if let Some(reason) = decode_revert_reason(b.as_ref()) {
write!(f, ": {reason}")?;
}
Ok(())
}
_ => write!(f, ": {err}"),
},
Self::CairoVm(err) => write!(f, ": {err}"),
Self::Other(err) => write!(f, ": {err}"),
}
}
}
/// Error related to the Cairo vm execution failures.
#[derive(Debug, Error)]
pub enum CairoError {
#[error("cairo vm out of resources")]
VmOutOfResources,
}
/// Error related to EVM execution.
#[derive(Debug, Error)]
pub enum EvmError {
#[error("validation failed")]
Validation,
#[error("state modification error")]
StateModification,
#[error("unknown opcode")]
UnknownOpcode,
#[error("invalid jump dest")]
InvalidJumpDest,
#[error("caller is not a Kakarot EOA")]
NotKakarotEoaCaller,
#[error("function limited to view call")]
ViewFunction,
#[error("stack overflow")]
StackOverflow,
#[error("stack underflow")]
StackUnderflow,
#[error("out of bounds read")]
OutOfBoundsRead,
#[error("unknown precompile {0}")]
UnknownPrecompile(String),
#[error("unauthorized precompile")]
UnauthorizedPrecompile,
#[error("not implemented precompile {0}")]
NotImplementedPrecompile(String),
#[error("invalid cairo selector")]
InvalidCairoSelector,
#[error("precompile wrong input length")]
PrecompileInputLength,
#[error("precompile flag error")]
PrecompileFlag,
#[error("transfer amount exceeds balance")]
Balance,
#[error("address collision")]
AddressCollision,
#[error("out of gas")]
OutOfGas,
#[error("{0}")]
Other(Bytes),
}
impl From<Vec<Felt>> for EvmError {
fn from(value: Vec<Felt>) -> Self {
let bytes = value.into_iter().filter_map(|x| x.to_u8()).collect::<Vec<_>>();
let maybe_revert_reason = String::from_utf8(bytes.clone());
if maybe_revert_reason.is_err() {
return Self::Other(bytes.into());
}
let revert_reason = maybe_revert_reason.unwrap(); // safe unwrap
let trimmed = revert_reason.trim_start_matches("Kakarot: ").trim_start_matches("Precompile: ");
match trimmed {
"eth validation failed" => Self::Validation,
"StateModificationError" => Self::StateModification,
"UnknownOpcode" => Self::UnknownOpcode,
"invalidJumpDestError" => Self::InvalidJumpDest,
"caller contract is not a Kakarot account" => Self::NotKakarotEoaCaller,
"entrypoint should only be called in view mode" => Self::ViewFunction,
"StackOverflow" => Self::StackOverflow,
"StackUnderflow" => Self::StackUnderflow,
"OutOfBoundsRead" => Self::OutOfBoundsRead,
s if s.contains("UnknownPrecompile") => {
Self::UnknownPrecompile(s.trim_start_matches("UnknownPrecompile ").to_string())
}
"unauthorizedPrecompile" => Self::UnauthorizedPrecompile,
s if s.contains("NotImplementedPrecompile") => {
Self::NotImplementedPrecompile(s.trim_start_matches("NotImplementedPrecompile ").to_string())
}
"invalidCairoSelector" => Self::InvalidCairoSelector,
"wrong input_length" => Self::PrecompileInputLength,
"flag error" => Self::PrecompileFlag,
"transfer amount exceeds balance" => Self::Balance,
"addressCollision" => Self::AddressCollision,
s if s.contains("outOfGas") => Self::OutOfGas,
_ => Self::Other(bytes.into()),
}
}
}
/// Error related to a transaction.
#[derive(Debug, Error)]
pub enum TransactionError {
/// Thrown when the chain id is invalid.
#[error("invalid chain id")]
InvalidChainId,
/// Thrown when the transaction type is invalid.
#[error("invalid transaction type")]
InvalidTransactionType,
/// Thrown when the gas used overflows u128.
#[error("gas overflow")]
GasOverflow,
/// Thrown when the max fee per gas is lower than the base fee.
#[error("max fee per gas {0} lower than base fee {1}")]
FeeCapTooLow(u128, u128),
/// Thrown when the max fee per gas is lower than the max priority fee per gas.
#[error("max fee per gas {0} lower than max priority fee per gas {1}")]
TipAboveFeeCap(u128, u128),
/// Thrown when the gas limit exceeds the block's gas limit.
#[error("transaction gas limit {0} exceeds block gas limit {1}")]
ExceedsBlockGasLimit(u128, u128),
/// Thrown when the transaction isn't the
/// [`BlockTransactions::FullTransactions`] variant.
#[error("expected full transactions")]
ExpectedFullTransactions,
/// Thrown if the broadcasting of the Starknet transaction fails
#[error("broadcasting error: {0}")]
Broadcast(Box<dyn std::error::Error + Send + Sync>),
/// Thrown if the tracing fails
#[error("tracing error: {0}")]
Tracing(Box<dyn std::error::Error + Send + Sync>),
/// Thrown if the call with state or block overrides fails
#[error("tracing error: {0}")]
Call(Box<dyn std::error::Error + Send + Sync>),
}
impl From<&TransactionError> for EthRpcErrorCode {
fn from(error: &TransactionError) -> Self {
match error {
TransactionError::InvalidChainId | TransactionError::InvalidTransactionType => Self::InvalidInput,
TransactionError::GasOverflow
| TransactionError::FeeCapTooLow(_, _)
| TransactionError::TipAboveFeeCap(_, _) => Self::TransactionRejected,
TransactionError::ExpectedFullTransactions
| TransactionError::Tracing(_)
| TransactionError::Call(_)
| TransactionError::Broadcast(_)
| TransactionError::ExceedsBlockGasLimit(_, _) => Self::InternalError,
}
}
}
/// Error related to signature.
#[derive(Debug, Error)]
pub enum SignatureError {
/// Thrown when signer recovery fails.
#[error("could not recover signer")]
Recovery,
/// Thrown when signing fails.
#[error("failed to sign transaction")]
SigningFailure,
/// Thrown when signature is missing.
#[error("missing signature")]
MissingSignature,
/// Thrown when parity is invalid.
#[error("invalid parity")]
InvalidParity,
}
/// Error related to Ethereum data format.
#[derive(Debug, Error)]
pub enum EthereumDataFormatError {
/// Error related to conversion in header.
#[error("header conversion error")]
HeaderConversion,
/// Error related to conversion in receipt.
#[error("header conversion error")]
ReceiptConversion,
/// Error related to conversion in transaction.
#[error("transaction conversion error")]
TransactionConversion,
/// Error related to starknet to eth conversion or vice versa.
#[error("primitive conversion error")]
Primitive,
/// Error related to rlp conversion.
#[error(transparent)]
Rlp(#[from] alloy_rlp::Error),
/// Error related to alloy rpc/primitive conversion.
#[error(transparent)]
AlloyTransactionConversion(#[from] alloy_rpc_types::ConversionError),
/// Error related to integer conversion.
#[error(transparent)]
IntConversions(#[from] std::num::TryFromIntError),
/// Custom error with a static string.
#[error("{0}")]
CustomError(&'static str),
}
impl From<alloy_rlp::Error> for EthApiError {
fn from(value: alloy_rlp::Error) -> Self {
Self::EthereumDataFormat(EthereumDataFormatError::Rlp(value))
}
}
impl From<alloy_rpc_types::ConversionError> for EthApiError {
fn from(value: alloy_rpc_types::ConversionError) -> Self {
Self::EthereumDataFormat(EthereumDataFormatError::AlloyTransactionConversion(value))
}
}
impl From<&'static str> for EthApiError {
fn from(value: &'static str) -> Self {
Self::EthereumDataFormat(EthereumDataFormatError::CustomError(value))
}
}
impl From<std::num::TryFromIntError> for EthApiError {
fn from(value: std::num::TryFromIntError) -> Self {
Self::EthereumDataFormat(EthereumDataFormatError::IntConversions(value))
}
}
#[cfg(test)]
mod tests {
use starknet::core::types::ContractErrorData;
use super::*;
#[test]
fn test_assure_source_error_visible_in_kakarot_error() {
// Given
let err = KakarotError::Provider(starknet::providers::ProviderError::StarknetError(
starknet::core::types::StarknetError::UnexpectedError("test".to_string()),
));
// When
let eth_err: EthApiError = err.into();
let json_err: ErrorObject<'static> = eth_err.into();
// Then
assert_eq!(json_err.message(), "starknet provider error: StarknetError(UnexpectedError(\"test\"))");
}
#[test]
fn test_decode_revert_message() {
// Given
let b: Vec<_> = vec![
0x08u8, 0xc3, 0x79, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x46, 0x61, 0x75,
0x63, 0x65, 0x74, 0x3a, 0x20, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x20, 0x74, 0x6f, 0x6f, 0x20, 0x73, 0x6f, 0x6f,
0x6e, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
let bytes = b.clone().into_iter().map(Felt::from).collect::<Vec<_>>();
// When
let evm_err: EvmError = bytes.into();
let json_rpsee_error: ErrorObject<'static> = EthApiError::Execution(ExecutionError::Evm(evm_err)).into();
// Then
assert_eq!(json_rpsee_error.message(), "execution reverted: revert: Faucet: Claim too soon.");
assert_eq!(json_rpsee_error.code(), 3);
assert_eq!(format!("{}", json_rpsee_error.data().unwrap()), format!("\"{}\"", Bytes::from(b)));
}
#[test]
fn test_decode_undecodable_message() {
// Given
let b = vec![
0x6cu8, 0xa7, 0xb8, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71,
0x52, 0xe0, 0x85, 0x5b, 0xab, 0x82, 0xb8, 0xe1, 0x0b, 0x86, 0x92, 0xe5, 0x84, 0xad, 0x03, 0x4b, 0xd2, 0x29,
0x12,
];
let bytes = b.clone().into_iter().map(Felt::from).collect::<Vec<_>>();
// When
let evm_err: EvmError = bytes.into();
let json_rpsee_error: ErrorObject<'static> = EthApiError::Execution(ExecutionError::Evm(evm_err)).into();
// Then
assert_eq!(json_rpsee_error.message(), "execution reverted");
assert_eq!(json_rpsee_error.code(), 3);
assert_eq!(format!("{}", json_rpsee_error.data().unwrap()), format!("\"{}\"", Bytes::from(b)));
}
#[test]
fn test_decode_kakarot_evm_error() {
// Given
let bytes = vec![
0x4bu8, 0x61, 0x6b, 0x61, 0x72, 0x6f, 0x74, 0x3a, 0x20, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x70, 0x6f, 0x69,
0x6e, 0x74, 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x62, 0x65, 0x20,
0x63, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x76, 0x69, 0x65, 0x77, 0x20, 0x6d, 0x6f, 0x64,
0x65,
]
.into_iter()
.map(Felt::from)
.collect::<Vec<_>>();
// When
let evm_err: EvmError = bytes.into();
let json_rpsee_error: ErrorObject<'static> = EthApiError::Execution(ExecutionError::Evm(evm_err)).into();
// Then
assert_eq!(json_rpsee_error.message(), "execution reverted: function limited to view call");
assert_eq!(json_rpsee_error.code(), 3);
assert!(json_rpsee_error.data().is_none());
}
#[test]
fn test_display_execution_error() {
// Given
let err = EthApiError::Execution(ExecutionError::Evm(EvmError::Balance));
// When
let display = format!("{err}");
// Then
assert_eq!(display, "execution reverted: transfer amount exceeds balance");
}
#[test]
fn test_from_run_resources_error() {
let err = cainome::cairo_serde::Error::Provider(starknet::providers::ProviderError::StarknetError(
starknet::core::types::StarknetError::ContractError(ContractErrorData {
revert_error:
"Error in the called contract (0x007fbaddebb5e88696fac9fc5aaf8bdff4bbca1eaf06a0cb5ae94df8ea93f882):
Error at pc=0:31:
Got an exception while executing a hint.
Cairo traceback (most recent call last):
Unknown location (pc=0:4836)
Unknown location (pc=0:4775)
Unknown location (pc=0:3860)
Unknown location (pc=0:663)
Error in the called contract (0x040e005e7acea50434c537ba62e72e8a8e960d679c87609029d4639e2bdb9cb2):
Error at pc=0:24:
Could not reach the end of the program. RunResources has no remaining steps.
Cairo traceback (most recent call last):
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23327)
Unknown location (pc=0:23325)
Unknown location (pc=0:22158)"
.to_string(),
}),
));
// When
let eth_err: ExecutionError = err.into();
let display = format!("{eth_err}");
// Then
assert_eq!(display, "execution reverted: cairo vm out of resources");
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/utils.rs | src/providers/eth_provider/utils.rs | use alloy_primitives::{U128, U256};
use cainome::cairo_serde::Error;
use starknet::{
core::types::{ContractErrorData, StarknetError},
providers::ProviderError,
};
/// Splits a U256 value into two generic values implementing the From<u128> trait
#[inline]
pub fn split_u256<T: From<u128>>(value: impl Into<U256>) -> [T; 2] {
let value: U256 = value.into();
let low: u128 = (value & U256::from(U128::MAX)).try_into().unwrap(); // safe to unwrap
let high: U256 = value >> 128;
let high: u128 = high.try_into().unwrap(); // safe to unwrap
[T::from(low), T::from(high)]
}
/// Checks if the error is a contract not found error.
/// Some providers return a contract not found error when the contract is not deployed.
/// Katana returns a contract error with a revert message containing "is not deployed".
#[inline]
pub(crate) fn contract_not_found<T>(err: &Result<T, Error>) -> bool {
match err {
Ok(_) => false,
Err(err) => {
matches!(err, Error::Provider(ProviderError::StarknetError(StarknetError::ContractNotFound)))
|| matches!(
err,
Error::Provider(ProviderError::StarknetError(StarknetError::ContractError(ContractErrorData {
revert_error: reason
}))) if reason.contains("is not deployed")
)
}
}
}
#[inline]
pub(crate) fn class_hash_not_declared<T>(err: &Result<T, Error>) -> bool {
match err {
Ok(_) => false,
Err(err) => {
matches!(
err,
Error::Provider(ProviderError::StarknetError(StarknetError::ContractError(ContractErrorData {
revert_error
}))) if revert_error.contains("is not declared.")
)
}
}
}
/// Checks if the error is an entrypoint not found error.
#[inline]
pub(crate) fn entrypoint_not_found<T>(err: &Result<T, Error>) -> bool {
match err {
Ok(_) => false,
Err(err) => matches!(
err,
Error::Provider(ProviderError::StarknetError(StarknetError::ContractError(ContractErrorData {
revert_error: reason
}))) if reason.contains("Entry point") && reason.contains("not found in contract")
),
}
}
#[cfg(test)]
mod tests {
use super::*;
use proptest::prelude::*;
use std::str::FromStr;
#[test]
fn test_split_u256() {
// Define a property-based test using Proptest
proptest!(|(value in any::<U256>())| {
// Call the split_u256 function to split the U256 value into two u128 values
let result = split_u256::<u128>(value);
// Combine the two u128 values into a hexadecimal string
let combined_hex = format!("{:#x}{:0width$x}", result[1], result[0], width = 32);
// Assertion to check the equality with the original U256 value
assert_eq!(U256::from_str(&combined_hex).unwrap(), value);
});
}
#[test]
fn test_class_hash_not_declared() {
let err = Error::Provider(ProviderError::StarknetError(StarknetError::ContractError(ContractErrorData {
revert_error: "\"Error in the called contract (0x049d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7):
\\nError at pc=0:104:\\nGot an exception while executing a hint: Class with ClassHash(\\n StarkFelt(\\n
\\\"0x0000000000000000000000000000000000000000000000000000000000000000\\\",\\n ),\\n) is not declared.\\nCairo traceback (most recent call last):\\n
Unknown location (pc=0:1678)\\nUnknown location (pc=0:1664)\\n\"".to_string(),
})));
assert!(class_hash_not_declared::<()>(&Err(err)));
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/mod.rs | src/providers/eth_provider/mod.rs | pub mod blocks;
pub mod chain;
pub mod constant;
pub mod contracts;
pub mod database;
pub mod error;
pub mod gas;
pub mod logs;
pub mod provider;
pub mod receipts;
pub mod starknet;
pub mod state;
pub mod transactions;
pub mod tx_pool;
pub mod utils;
pub use blocks::*;
pub use chain::*;
pub use gas::*;
pub use logs::*;
pub use receipts::*;
pub use state::*;
pub use transactions::*;
pub use tx_pool::*;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/constant.rs | src/providers/eth_provider/constant.rs | use alloy_primitives::{B256, U256};
use serde::{Deserialize, Serialize};
use starknet::core::types::Felt;
use std::{str::FromStr, sync::LazyLock};
/// Maximum priority fee per gas
pub static MAX_PRIORITY_FEE_PER_GAS: LazyLock<u64> = LazyLock::new(|| 0);
/// Maximum number of logs that can be fetched in a single request
pub static MAX_LOGS: LazyLock<Option<u64>> =
LazyLock::new(|| std::env::var("MAX_LOGS").ok().and_then(|val| u64::from_str(&val).ok()));
/// Gas limit for estimate gas and call
pub const CALL_REQUEST_GAS_LIMIT: u64 = 50_000_000;
/// Number of characters for representing a U256 in a hex string form. Used for padding hashes
pub const HASH_HEX_STRING_LEN: usize = 64;
/// Number of characters for representing logs topics in a hex string form. Used for padding logs topics
pub const LOGS_TOPICS_HEX_STRING_LEN: usize = HASH_HEX_STRING_LEN;
/// Number of characters for representing a u64 in a hex string form. Used for padding numbers
pub const U64_HEX_STRING_LEN: usize = 16;
/// Number of characters for representing a block number in a hex string form. Used for padding block numbers
pub const BLOCK_NUMBER_HEX_STRING_LEN: usize = U64_HEX_STRING_LEN;
/// Number of characters for representing an address in a hex string form. Used for padding addresses
pub const ADDRESS_HEX_STRING_LEN: usize = 40;
/// Starknet Modulus: 0x800000000000011000000000000000000000000000000000000000000000001
pub const STARKNET_MODULUS: U256 = U256::from_limbs([0x1, 0, 0, 0x0800_0000_0000_0011]);
/// Struct used to return the constant values from the `kakarot_getConfig` endpoint
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
pub struct Constant {
/// Maximum number of logs to output for `eth_getLogs` RPC Method
pub max_logs: Option<u64>,
/// Name of the `StarkNet` network.
pub starknet_network: String,
/// Maximum number of Felts in calldata.
pub max_felts_in_calldata: usize,
/// List of whitelisted hashes allow to submit pre EIP-155 transactions.
pub white_listed_eip_155_transaction_hashes: Vec<B256>,
/// Kakarot address the RPC points to.
pub kakarot_address: Felt,
}
#[cfg(feature = "hive")]
pub mod hive {
use std::{
env::var,
str::FromStr,
sync::{Arc, LazyLock},
};
use starknet::{
accounts::{ConnectedAccount, ExecutionEncoding, SingleOwnerAccount},
core::types::Felt,
providers::{jsonrpc::HttpTransport, JsonRpcClient},
signers::{LocalWallet, SigningKey},
};
use tokio::sync::Mutex;
use crate::constants::{KAKAROT_RPC_CONFIG, STARKNET_CHAIN_ID};
pub static DEPLOY_WALLET: LazyLock<SingleOwnerAccount<JsonRpcClient<HttpTransport>, LocalWallet>> =
LazyLock::new(|| {
SingleOwnerAccount::new(
JsonRpcClient::new(HttpTransport::new(KAKAROT_RPC_CONFIG.network_url.clone())),
LocalWallet::from_signing_key(SigningKey::from_secret_scalar(
Felt::from_str(&var("KATANA_PRIVATE_KEY").expect("Missing deployer private key"))
.expect("Failed to parse deployer private key"),
)),
Felt::from_str(&var("KATANA_ACCOUNT_ADDRESS").expect("Missing deployer address"))
.expect("Failed to parse deployer address"),
*STARKNET_CHAIN_ID,
ExecutionEncoding::New,
)
});
pub static DEPLOY_WALLET_NONCE: LazyLock<Arc<Mutex<Felt>>> = LazyLock::new(|| {
tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
Arc::new(Mutex::new(DEPLOY_WALLET.get_nonce().await.expect("failed to fetch deploy wallet nonce")))
})
})
});
}
#[cfg(feature = "forwarding")]
pub mod forwarding {
use super::LazyLock;
use url::Url;
pub static MAIN_RPC_URL: LazyLock<Url> = LazyLock::new(|| {
Url::parse(&std::env::var("MAIN_RPC_URL").expect("Missing MAIN_RPC_URL environment variable"))
.expect("Invalid MAIN_RPC_URL environment variable")
});
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/gas.rs | src/providers/eth_provider/gas.rs | use super::{
constant::BLOCK_NUMBER_HEX_STRING_LEN,
error::{ExecutionError, KakarotError},
starknet::kakarot_core::{core::KakarotCoreReader, KAKAROT_ADDRESS},
};
use crate::{
into_via_wrapper,
providers::eth_provider::{
database::{filter::format_hex, types::header::StoredHeader},
provider::{EthApiResult, EthDataProvider},
},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{U256, U64};
use alloy_rpc_types::{FeeHistory, TransactionRequest};
use async_trait::async_trait;
use auto_impl::auto_impl;
use eyre::eyre;
use mongodb::bson::doc;
use tracing::Instrument;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait GasProvider {
/// Returns the result of a estimate gas.
async fn estimate_gas(&self, call: TransactionRequest, block_id: Option<BlockId>) -> EthApiResult<U256>;
/// Returns the fee history given a block count and a newest block number.
async fn fee_history(
&self,
block_count: U64,
newest_block: BlockNumberOrTag,
reward_percentiles: Option<Vec<f64>>,
) -> EthApiResult<FeeHistory>;
/// Returns the current gas price.
async fn gas_price(&self) -> EthApiResult<U256>;
}
#[async_trait]
impl<SP> GasProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn estimate_gas(&self, request: TransactionRequest, block_id: Option<BlockId>) -> EthApiResult<U256> {
// Set a high gas limit to make sure the transaction will not fail due to gas.
let request = TransactionRequest { gas: Some(u64::MAX), ..request };
let gas_used = self.estimate_gas_inner(request, block_id).await?;
// Increase the gas used by 40% to make sure the transaction will not fail due to gas.
// This is a temporary solution until we have a proper gas estimation.
// Does not apply to Hive feature otherwise end2end tests will fail.
let gas_used = if cfg!(feature = "hive") { gas_used } else { gas_used * 140 / 100 };
Ok(U256::from(gas_used))
}
async fn fee_history(
&self,
block_count: U64,
newest_block: BlockNumberOrTag,
_reward_percentiles: Option<Vec<f64>>,
) -> EthApiResult<FeeHistory> {
if block_count == U64::ZERO {
return Ok(FeeHistory::default());
}
let end_block = self.tag_into_block_number(newest_block).await?;
let end_block_plus_one = end_block.saturating_add(1);
// 0 <= start_block <= end_block
let start_block = end_block_plus_one.saturating_sub(block_count.to());
let header_filter = doc! {"$and": [ { "header.number": { "$gte": format_hex(start_block, BLOCK_NUMBER_HEX_STRING_LEN) } }, { "header.number": { "$lte": format_hex(end_block, BLOCK_NUMBER_HEX_STRING_LEN) } } ] };
let blocks: Vec<StoredHeader> = self.database().get(header_filter, None).await?;
if blocks.is_empty() {
return Err(
KakarotError::from(mongodb::error::Error::custom(eyre!("No blocks found in the database"))).into()
);
}
let gas_used_ratio = blocks
.iter()
.map(|header| {
let gas_used = header.gas_used as f64;
let mut gas_limit = header.gas_limit as f64;
if gas_limit == 0. {
gas_limit = 1.;
};
gas_used / gas_limit
})
.collect();
let mut base_fee_per_gas =
blocks.iter().map(|header| header.base_fee_per_gas.unwrap_or_default()).collect::<Vec<_>>();
// TODO(EIP1559): Remove this when proper base fee computation: if gas_ratio > 50%, increase base_fee_per_gas
base_fee_per_gas.extend_from_within((base_fee_per_gas.len() - 1)..);
Ok(FeeHistory {
base_fee_per_gas: base_fee_per_gas.into_iter().map(Into::into).collect(),
gas_used_ratio,
oldest_block: start_block,
reward: Some(vec![]),
..Default::default()
})
}
async fn gas_price(&self) -> EthApiResult<U256> {
let kakarot_contract = KakarotCoreReader::new(*KAKAROT_ADDRESS, self.starknet_provider_inner());
let span = tracing::span!(tracing::Level::INFO, "sn::base_fee");
let gas_price =
kakarot_contract.get_base_fee().call().instrument(span).await.map_err(ExecutionError::from)?.base_fee;
Ok(into_via_wrapper!(gas_price))
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/receipts.rs | src/providers/eth_provider/receipts.rs | use super::database::{filter::EthDatabaseFilterBuilder, types::receipt::StoredTransactionReceipt};
use crate::providers::eth_provider::{
database::{
ethereum::EthereumBlockStore,
filter::{self},
types::receipt::ExtendedTxReceipt,
},
provider::{EthApiResult, EthDataProvider},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::B256;
use async_trait::async_trait;
use auto_impl::auto_impl;
use mongodb::bson::doc;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait ReceiptProvider {
/// Returns the transaction receipt by hash of the transaction.
async fn transaction_receipt(&self, hash: B256) -> EthApiResult<Option<ExtendedTxReceipt>>;
/// Returns the block receipts for a block.
async fn block_receipts(&self, block_id: Option<BlockId>) -> EthApiResult<Option<Vec<ExtendedTxReceipt>>>;
}
#[async_trait]
impl<SP> ReceiptProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn transaction_receipt(&self, hash: B256) -> EthApiResult<Option<ExtendedTxReceipt>> {
let filter = EthDatabaseFilterBuilder::<filter::Receipt>::default().with_tx_hash(&hash).build();
Ok(self.database().get_one::<StoredTransactionReceipt>(filter, None).await?.map(Into::into))
}
async fn block_receipts(&self, block_id: Option<BlockId>) -> EthApiResult<Option<Vec<ExtendedTxReceipt>>> {
match block_id.unwrap_or_else(|| BlockNumberOrTag::Latest.into()) {
BlockId::Number(number_or_tag) => {
let block_number = self.tag_into_block_number(number_or_tag).await?;
if !self.database().block_exists(block_number.into()).await? {
return Ok(None);
}
let filter =
EthDatabaseFilterBuilder::<filter::Receipt>::default().with_block_number(block_number).build();
let tx: Vec<StoredTransactionReceipt> = self.database().get(filter, None).await?;
Ok(Some(tx.into_iter().map(Into::into).collect()))
}
BlockId::Hash(hash) => {
if !self.database().block_exists(hash.block_hash.into()).await? {
return Ok(None);
}
let filter =
EthDatabaseFilterBuilder::<filter::Receipt>::default().with_block_hash(&hash.block_hash).build();
Ok(Some(self.database().get_and_map_to::<_, StoredTransactionReceipt>(filter, None).await?))
}
}
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/blocks.rs | src/providers/eth_provider/blocks.rs | use super::{
database::{
ethereum::EthereumBlockStore,
types::{header::ExtendedBlock, transaction::ExtendedTransaction},
},
error::KakarotError,
};
use crate::providers::eth_provider::{
database::ethereum::EthereumTransactionStore,
provider::{EthApiResult, EthDataProvider},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{B256, U256, U64};
use alloy_rpc_types::Header;
use async_trait::async_trait;
use auto_impl::auto_impl;
use mongodb::bson::doc;
use tracing::Instrument;
/// Ethereum block provider trait.
#[async_trait]
#[auto_impl(Arc, &)]
pub trait BlockProvider {
/// Get header by block id
async fn header(&self, block_id: &BlockId) -> EthApiResult<Option<Header>>;
/// Returns the latest block number.
async fn block_number(&self) -> EthApiResult<U64>;
/// Returns a block by hash. Block can be full or just the hashes of the transactions.
async fn block_by_hash(&self, hash: B256, full: bool) -> EthApiResult<Option<ExtendedBlock>>;
/// Returns a block by number. Block can be full or just the hashes of the transactions.
async fn block_by_number(&self, number_or_tag: BlockNumberOrTag, full: bool)
-> EthApiResult<Option<ExtendedBlock>>;
/// Returns the transaction count for a block by hash.
async fn block_transaction_count_by_hash(&self, hash: B256) -> EthApiResult<Option<U256>>;
/// Returns the transaction count for a block by number.
async fn block_transaction_count_by_number(&self, number_or_tag: BlockNumberOrTag) -> EthApiResult<Option<U256>>;
/// Returns the transactions for a block.
async fn block_transactions(&self, block_id: Option<BlockId>) -> EthApiResult<Option<Vec<ExtendedTransaction>>>;
}
#[async_trait]
impl<SP> BlockProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn header(&self, block_id: &BlockId) -> EthApiResult<Option<Header>> {
let block_hash_or_number = self.block_id_into_block_number_or_hash(*block_id).await?;
Ok(self.database().header(block_hash_or_number).await?)
}
async fn block_number(&self) -> EthApiResult<U64> {
let block_number = match self.database().latest_header().await? {
// In case the database is empty, use the starknet provider
None => {
let span = tracing::span!(tracing::Level::INFO, "sn::block_number");
U64::from(
self.starknet_provider_inner().block_number().instrument(span).await.map_err(KakarotError::from)?,
)
}
Some(header) => {
let is_pending_block = header.hash.is_zero();
U64::from(if is_pending_block { header.number - 1 } else { header.number })
}
};
Ok(block_number)
}
async fn block_by_hash(&self, hash: B256, full: bool) -> EthApiResult<Option<ExtendedBlock>> {
Ok(self.database().block(hash.into(), full).await?)
}
async fn block_by_number(
&self,
number_or_tag: BlockNumberOrTag,
full: bool,
) -> EthApiResult<Option<ExtendedBlock>> {
let block_number = self.tag_into_block_number(number_or_tag).await?;
Ok(self.database().block(block_number.into(), full).await?)
}
async fn block_transaction_count_by_hash(&self, hash: B256) -> EthApiResult<Option<U256>> {
self.database().transaction_count(hash.into()).await
}
async fn block_transaction_count_by_number(&self, number_or_tag: BlockNumberOrTag) -> EthApiResult<Option<U256>> {
let block_number = self.tag_into_block_number(number_or_tag).await?;
self.database().transaction_count(block_number.into()).await
}
async fn block_transactions(&self, block_id: Option<BlockId>) -> EthApiResult<Option<Vec<ExtendedTransaction>>> {
let block_hash_or_number = self
.block_id_into_block_number_or_hash(block_id.unwrap_or_else(|| BlockNumberOrTag::Latest.into()))
.await?;
if !self.database().block_exists(block_hash_or_number).await? {
return Ok(None);
}
Ok(Some(self.database().transactions(block_hash_or_number).await?))
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/logs.rs | src/providers/eth_provider/logs.rs | use super::{
constant::MAX_LOGS,
database::{filter::EthDatabaseFilterBuilder, types::log::StoredLog},
error::EthApiError,
};
use crate::providers::eth_provider::{
database::{
filter::{self},
FindOpts,
},
provider::{EthApiResult, EthDataProvider},
BlockProvider,
};
use alloy_rpc_types::{Filter, FilterChanges};
use async_trait::async_trait;
use auto_impl::auto_impl;
#[async_trait]
#[auto_impl(Arc, &)]
pub trait LogProvider: BlockProvider {
async fn get_logs(&self, filter: Filter) -> EthApiResult<FilterChanges>;
}
#[async_trait]
impl<SP> LogProvider for EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
async fn get_logs(&self, filter: Filter) -> EthApiResult<FilterChanges> {
let block_hash = filter.get_block_hash();
// Create the database filter.
let mut builder = EthDatabaseFilterBuilder::<filter::Log>::default();
builder = if block_hash.is_some() {
// We filter by block hash on matching the exact block hash.
builder.with_block_hash(&block_hash.unwrap())
} else {
let current_block = self.block_number().await?;
let current_block =
current_block.try_into().map_err(|_| EthApiError::UnknownBlockNumber(Some(current_block.to())))?;
let from = filter.get_from_block().unwrap_or_default();
let to = filter.get_to_block().unwrap_or(current_block);
let (from, to) = match (from, to) {
(from, to) if from > current_block || to < from => return Ok(FilterChanges::Empty),
(from, to) if to > current_block => (from, current_block),
other => other,
};
// We filter by block number using $gte and $lte.
builder.with_block_number_range(from, to)
};
// TODO: this will work for now but isn't very efficient. Would need to:
// 1. Create the bloom filter from the topics
// 2. Query the database for logs within block range with the bloom filter
// 3. Filter this reduced set of logs by the topics
// 4. Limit the number of logs returned
// Convert the topics to a MongoDB filter and add it to the database filter
builder = builder.with_topics(&filter.topics);
// Add the addresses
builder = builder.with_addresses(&filter.address.into_iter().collect::<Vec<_>>());
Ok(FilterChanges::Logs(
self.database()
.get_and_map_to::<_, StoredLog>(
builder.build(),
(*MAX_LOGS).map(|limit| FindOpts::default().with_limit(limit)),
)
.await?,
))
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/provider.rs | src/providers/eth_provider/provider.rs | use super::{
constant::CALL_REQUEST_GAS_LIMIT,
database::{ethereum::EthereumBlockStore, Database},
error::{EthApiError, EvmError, ExecutionError, TransactionError},
starknet::kakarot_core::{
self,
core::{CallInput, KakarotCoreReader, Uint256},
KAKAROT_ADDRESS,
},
};
use crate::{
constants::ETH_CHAIN_ID,
into_via_try_wrapper, into_via_wrapper,
models::block::{EthBlockId, EthBlockNumberOrTag},
providers::{
eth_provider::{BlockProvider, GasProvider, LogProvider, ReceiptProvider, StateProvider, TransactionProvider},
sn_provider::StarknetProvider,
},
};
use alloy_eips::{BlockId, BlockNumberOrTag};
use alloy_primitives::{TxKind, U256};
use alloy_rpc_types::{BlockHashOrNumber, TransactionRequest};
use cainome::cairo_serde::CairoArrayLegacy;
use eyre::Result;
use itertools::Itertools;
use mongodb::bson::doc;
use num_traits::cast::ToPrimitive;
use starknet::core::types::Felt;
use tracing::{instrument, Instrument};
#[cfg(feature = "hive")]
use {
crate::providers::eth_provider::error::SignatureError,
crate::providers::eth_provider::starknet::kakarot_core::{
account_contract::AccountContractReader, starknet_address,
},
crate::providers::eth_provider::utils::contract_not_found,
alloy_primitives::Address,
};
/// A type alias representing a result type for Ethereum API operations.
///
/// This alias is used to simplify function signatures that return a `Result`
/// with an [`EthApiError`] as the error type.
pub type EthApiResult<T> = Result<T, EthApiError>;
/// A trait that defines the interface for an Ethereum Provider.
pub trait EthereumProvider:
GasProvider + StateProvider + TransactionProvider + ReceiptProvider + LogProvider + BlockProvider
{
}
impl<T> EthereumProvider for T where
T: GasProvider + StateProvider + TransactionProvider + ReceiptProvider + LogProvider + BlockProvider
{
}
/// Structure that implements the `EthereumProvider` trait.
/// Uses access to a database for certain data, while
/// the rest is fetched from the Starknet Provider.
#[derive(Debug, Clone)]
pub struct EthDataProvider<SP: starknet::providers::Provider + Send + Sync> {
database: Database,
starknet_provider: StarknetProvider<SP>,
pub chain_id: u64,
}
impl<SP> EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
/// Returns a reference to the database.
pub const fn database(&self) -> &Database {
&self.database
}
/// Returns a reference to the Starknet provider.
pub const fn starknet_provider(&self) -> &StarknetProvider<SP> {
&self.starknet_provider
}
/// Returns a reference to the underlying SP provider.
pub fn starknet_provider_inner(&self) -> &SP {
&self.starknet_provider
}
}
impl<SP> EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
pub fn new(database: Database, starknet_provider: StarknetProvider<SP>) -> Self {
Self { database, starknet_provider, chain_id: *ETH_CHAIN_ID }
}
/// Prepare the call input for an estimate gas or call from a transaction request.
#[instrument(skip(self, request), name = "prepare_call")]
async fn prepare_call_input(
&self,
request: TransactionRequest,
block_id: Option<BlockId>,
) -> EthApiResult<CallInput> {
// unwrap option
let to: kakarot_core::core::Option = {
match request.to {
Some(TxKind::Call(to)) => {
kakarot_core::core::Option { is_some: Felt::ONE, value: into_via_wrapper!(to) }
}
_ => kakarot_core::core::Option { is_some: Felt::ZERO, value: Felt::ZERO },
}
};
// Here we check if CallRequest.origin is None, if so, we insert origin = address(0)
let from = into_via_wrapper!(request.from.unwrap_or_default());
let data = request.input.into_input().unwrap_or_default();
let calldata: Vec<Felt> = data.into_iter().map_into().collect();
let gas_limit = into_via_try_wrapper!(request.gas.unwrap_or(CALL_REQUEST_GAS_LIMIT))?;
// We cannot unwrap_or_default() here because Kakarot.eth_call will
// Reject transactions with gas_price < Kakarot.base_fee
let gas_price = {
let gas_price = match request.gas_price {
Some(gas_price) => U256::from(gas_price),
None => self.gas_price().await?,
};
into_via_try_wrapper!(gas_price)?
};
let value = Uint256 { low: into_via_try_wrapper!(request.value.unwrap_or_default())?, high: Felt::ZERO };
// TODO: replace this by into_via_wrapper!(request.nonce.unwrap_or_default())
// when we can simulate the transaction instead of calling `eth_call`
let nonce = {
match request.nonce {
Some(nonce) => into_via_wrapper!(nonce),
None => match request.from {
None => Felt::ZERO,
Some(address) => into_via_try_wrapper!(self.transaction_count(address, block_id).await?)?,
},
}
};
Ok(CallInput { nonce, from, to, gas_limit, gas_price, value, calldata })
}
/// Call the Kakarot contract with the given request.
pub(crate) async fn call_inner(
&self,
request: TransactionRequest,
block_id: Option<BlockId>,
) -> EthApiResult<CairoArrayLegacy<Felt>> {
tracing::trace!(?request);
let starknet_block_id = self.to_starknet_block_id(block_id).await?;
let call_input = self.prepare_call_input(request, block_id).await?;
let kakarot_contract = KakarotCoreReader::new(*KAKAROT_ADDRESS, self.starknet_provider_inner());
let span = tracing::span!(tracing::Level::INFO, "sn::eth_call");
let call_output = kakarot_contract
.eth_call(
&call_input.nonce,
&call_input.from,
&call_input.to,
&call_input.gas_limit,
&call_input.gas_price,
&call_input.value,
&call_input.calldata.len().into(),
&CairoArrayLegacy(call_input.calldata),
&Felt::ZERO,
&CairoArrayLegacy(vec![]),
)
.block_id(starknet_block_id)
.call()
.instrument(span)
.await
.map_err(ExecutionError::from)?;
let return_data = call_output.return_data;
if call_output.success == Felt::ZERO {
return Err(ExecutionError::from(EvmError::from(return_data.0)).into());
}
Ok(return_data)
}
/// Estimate the gas used in Kakarot for the given request.
pub(crate) async fn estimate_gas_inner(
&self,
request: TransactionRequest,
block_id: Option<BlockId>,
) -> EthApiResult<u128> {
let starknet_block_id = self.to_starknet_block_id(block_id).await?;
let call_input = self.prepare_call_input(request, block_id).await?;
let kakarot_contract = KakarotCoreReader::new(*KAKAROT_ADDRESS, self.starknet_provider_inner());
let span = tracing::span!(tracing::Level::INFO, "sn::eth_estimate_gas");
let estimate_gas_output = kakarot_contract
.eth_estimate_gas(
&call_input.nonce,
&call_input.from,
&call_input.to,
&call_input.gas_limit,
&call_input.gas_price,
&call_input.value,
&call_input.calldata.len().into(),
&CairoArrayLegacy(call_input.calldata),
&Felt::ZERO,
&CairoArrayLegacy(vec![]),
)
.block_id(starknet_block_id)
.call()
.instrument(span)
.await
.map_err(ExecutionError::from)?;
let return_data = estimate_gas_output.return_data;
if estimate_gas_output.success == Felt::ZERO {
return Err(ExecutionError::from(EvmError::from(return_data.0)).into());
}
let required_gas = estimate_gas_output.required_gas.to_u128().ok_or(TransactionError::GasOverflow)?;
Ok(required_gas)
}
/// Convert the given block id into a Starknet block id
#[instrument(skip_all, ret)]
pub async fn to_starknet_block_id(
&self,
block_id: Option<BlockId>,
) -> EthApiResult<starknet::core::types::BlockId> {
match block_id {
Some(BlockId::Hash(hash)) => Ok(EthBlockId::new(BlockId::Hash(hash)).try_into()?),
Some(BlockId::Number(number_or_tag)) => {
// There is a need to separate the BlockNumberOrTag case into three subcases
// because pending Starknet blocks don't have a number.
// 1. The block number corresponds to a Starknet pending block, then we return the pending tag
// 2. The block number corresponds to a Starknet sealed block, then we return the block number
// 3. The block number is not found, then we return an error
match number_or_tag {
BlockNumberOrTag::Number(number) => {
let header = self
.database
.header(number.into())
.await?
.ok_or(EthApiError::UnknownBlockNumber(Some(number)))?;
// If the block hash is zero, then the block corresponds to a Starknet pending block
if header.hash.is_zero() {
Ok(starknet::core::types::BlockId::Tag(starknet::core::types::BlockTag::Pending))
} else {
Ok(starknet::core::types::BlockId::Number(number))
}
}
_ => Ok(EthBlockNumberOrTag::from(number_or_tag).into()),
}
}
None => Ok(starknet::core::types::BlockId::Tag(starknet::core::types::BlockTag::Pending)),
}
}
/// Converts the given [`BlockNumberOrTag`] into a block number.
#[instrument(skip(self))]
pub(crate) async fn tag_into_block_number(&self, tag: BlockNumberOrTag) -> EthApiResult<u64> {
match tag {
// Converts the tag representing the earliest block into block number 0.
BlockNumberOrTag::Earliest => Ok(0),
// Converts the tag containing a specific block number into a `U64`.
BlockNumberOrTag::Number(number) => Ok(number),
// Returns `self.block_number()` which is the block number of the latest finalized block.
BlockNumberOrTag::Latest | BlockNumberOrTag::Finalized | BlockNumberOrTag::Safe => {
self.block_number().await.map(|x| x.to())
}
// Adds 1 to the block number of the latest finalized block.
BlockNumberOrTag::Pending => Ok(self.block_number().await?.to::<u64>().saturating_add(1)),
}
}
/// Converts the given [`BlockId`] into a [`BlockHashOrNumber`].
#[instrument(skip_all, ret)]
pub(crate) async fn block_id_into_block_number_or_hash(
&self,
block_id: BlockId,
) -> EthApiResult<BlockHashOrNumber> {
match block_id {
BlockId::Hash(hash) => Ok(hash.into()),
BlockId::Number(number_or_tag) => Ok(self.tag_into_block_number(number_or_tag).await?.into()),
}
}
}
#[cfg(feature = "hive")]
impl<SP> EthDataProvider<SP>
where
SP: starknet::providers::Provider + Send + Sync,
{
/// Deploy the EVM transaction signer if a corresponding contract is not found on
/// Starknet.
pub(crate) async fn deploy_evm_transaction_signer(&self, signer: Address) -> EthApiResult<()> {
use crate::providers::eth_provider::{
constant::hive::{DEPLOY_WALLET, DEPLOY_WALLET_NONCE},
error::EthereumDataFormatError,
};
use starknet::{
accounts::ExecutionV1,
core::{
types::{BlockTag, Call},
utils::get_selector_from_name,
},
};
let signer_starknet_address = starknet_address(signer);
let account_contract = AccountContractReader::new(signer_starknet_address, self.starknet_provider_inner());
let maybe_is_initialized = account_contract
.is_initialized()
.block_id(starknet::core::types::BlockId::Tag(BlockTag::Latest))
.call()
.await;
if contract_not_found(&maybe_is_initialized) {
let execution = ExecutionV1::new(
vec![Call {
to: *KAKAROT_ADDRESS,
selector: get_selector_from_name("deploy_externally_owned_account").unwrap(),
calldata: vec![into_via_wrapper!(signer)],
}],
&*DEPLOY_WALLET,
);
let mut nonce = DEPLOY_WALLET_NONCE.lock().await;
let current_nonce = *nonce;
let prepared_execution = execution
.nonce(current_nonce)
.max_fee(u64::MAX.into())
.prepared()
.map_err(|_| EthApiError::EthereumDataFormat(EthereumDataFormatError::TransactionConversion))?;
let _ = prepared_execution.send().await.map_err(|_| SignatureError::SigningFailure)?;
*nonce += Felt::ONE;
drop(nonce);
};
Ok(())
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/tx_pool.rs | src/providers/eth_provider/tx_pool.rs | use super::database::types::transaction::ExtendedTransaction;
use crate::providers::eth_provider::provider::EthApiResult;
use alloy_rpc_types_txpool::TxpoolContent;
use async_trait::async_trait;
use auto_impl::auto_impl;
use mongodb::bson::doc;
/// Ethereum provider trait. Used to abstract away the database and the network.
#[async_trait]
#[auto_impl(Arc, &)]
pub trait TxPoolProvider {
/// Returns a vec of pending pool transactions.
fn content(&self) -> TxpoolContent<ExtendedTransaction>;
/// Returns the content of the pending pool.
async fn txpool_content(&self) -> EthApiResult<TxpoolContent<ExtendedTransaction>>;
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/starknet/relayer.rs | src/providers/eth_provider/starknet/relayer.rs | use crate::{
constants::STARKNET_CHAIN_ID,
models::transaction::transaction_data_to_starknet_calldata,
providers::eth_provider::{
database::{ethereum::EthereumTransactionStore, types::transaction::EthStarknetHashes, Database},
error::{SignatureError, TransactionError},
provider::EthApiResult,
starknet::kakarot_core::{starknet_address, EXECUTE_FROM_OUTSIDE},
},
};
use reth_primitives::TransactionSigned;
use starknet::{
accounts::{Account, ConnectedAccount, ExecutionEncoding, ExecutionV1, SingleOwnerAccount},
core::types::{BlockTag, Felt, NonZeroFelt},
providers::Provider,
signers::{LocalWallet, SigningKey},
};
use std::{
env::var,
ops::Deref,
str::FromStr,
sync::{Arc, LazyLock},
};
/// Signer for all relayers
static RELAYER_SIGNER: LazyLock<LocalWallet> = LazyLock::new(|| {
LocalWallet::from_signing_key(SigningKey::from_secret_scalar(
Felt::from_str(&var("RELAYER_PRIVATE_KEY").expect("missing relayer private key"))
.expect("failed to parse relayer private key"),
))
});
/// A relayer holding an account and a balance.
///
/// The relayer is used to sign transactions and broadcast them on the network.
#[derive(Debug)]
pub struct Relayer<SP: Provider + Send + Sync> {
/// The account used to sign and broadcast the transaction
account: SingleOwnerAccount<SP, LocalWallet>,
/// The balance of the relayer
balance: Felt,
/// The database used to store the relayer's transaction hashes map (Ethereum -> Starknet)
database: Option<Arc<Database>>,
}
impl<SP> Relayer<SP>
where
SP: Provider + Send + Sync,
{
/// Create a new relayer with the provided Starknet provider, address, balance.
pub fn new(address: Felt, balance: Felt, provider: SP, database: Option<Arc<Database>>) -> Self {
let relayer = SingleOwnerAccount::new(
provider,
RELAYER_SIGNER.clone(),
address,
*STARKNET_CHAIN_ID,
ExecutionEncoding::New,
);
Self { account: relayer, balance, database }
}
/// Relay the provided Ethereum transaction on the Starknet network.
/// The relayer nonce is directly fetched from the chain to have the most up-to-date value.
/// This is a way to avoid nonce issues.
///
/// Returns the corresponding Starknet transaction hash.
pub async fn relay_transaction(&self, transaction: &TransactionSigned) -> EthApiResult<Felt> {
// Transform the transaction's data to Starknet calldata
let relayer_address = self.account.address();
let calldata = transaction_data_to_starknet_calldata(transaction, relayer_address)?;
// Recover the signer
let eoa_address = transaction.recover_signer().ok_or(SignatureError::Recovery)?;
let eoa_address = starknet_address(eoa_address);
// Construct the call
let call = starknet::core::types::Call { to: eoa_address, selector: *EXECUTE_FROM_OUTSIDE, calldata };
let mut execution = ExecutionV1::new(vec![call], &self.account);
// Fetch the relayer nonce from the Starknet provider
let relayer_nonce = self
.account
.provider()
.get_nonce(starknet::core::types::BlockId::Tag(BlockTag::Pending), relayer_address)
.await
.unwrap_or_default();
execution = execution.nonce(relayer_nonce);
// We set the max fee to the balance of the account / 5. This means that the account could
// send up to 5 transactions before hitting a feeder gateway error.
execution = execution.max_fee(self.balance.floor_div(&NonZeroFelt::from_felt_unchecked(5.into())));
let prepared = execution.prepared().map_err(|_| SignatureError::SigningFailure)?;
let res = prepared.send().await.map_err(|err| TransactionError::Broadcast(err.into()))?;
// Store a transaction hash mapping from Ethereum to Starknet in the database
if let Some(database) = &self.database {
database
.upsert_transaction_hashes(EthStarknetHashes {
eth_hash: transaction.hash,
starknet_hash: res.transaction_hash,
})
.await?;
}
Ok(res.transaction_hash)
}
pub fn address(&self) -> Felt {
self.account.address()
}
}
impl<SP> Deref for Relayer<SP>
where
SP: Provider + Send + Sync,
{
type Target = SingleOwnerAccount<SP, LocalWallet>;
fn deref(&self) -> &Self::Target {
&self.account
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/starknet/mod.rs | src/providers/eth_provider/starknet/mod.rs | #![allow(non_snake_case, clippy::derive_partial_eq_without_eq)]
pub mod kakarot_core;
pub mod relayer;
use cainome::rs::abigen_legacy;
use starknet::core::types::Felt;
use std::sync::LazyLock;
abigen_legacy!(ERC20, "./.kakarot/artifacts/ERC20.json");
/// Starknet native token address
pub static STARKNET_NATIVE_TOKEN: LazyLock<Felt> =
LazyLock::new(|| Felt::from_hex("0x49d36570d4e46f48e99674bd3fcc84644ddd6b96f7c741b1562b82f9e004dc7").unwrap());
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/starknet/kakarot_core.rs | src/providers/eth_provider/starknet/kakarot_core.rs | #![allow(clippy::too_many_arguments)]
use crate::into_via_wrapper;
use alloy_primitives::{Address, B256};
use cainome::rs::abigen_legacy;
use dotenvy::dotenv;
use starknet::{
core::{types::Felt, utils::get_contract_address},
macros::selector,
};
use std::{str::FromStr, sync::LazyLock};
// Contract ABIs
pub mod account_contract {
use super::abigen_legacy;
abigen_legacy!(AccountContract, "./.kakarot/artifacts/account_contract.json");
}
#[allow(clippy::too_many_arguments)]
pub mod core {
use super::{abigen_legacy, Felt};
abigen_legacy!(KakarotCore, "./.kakarot/artifacts/kakarot.json");
#[allow(missing_debug_implementations)]
pub struct CallInput {
pub(crate) nonce: Felt,
pub(crate) from: Felt,
pub(crate) to: self::Option,
pub(crate) gas_limit: Felt,
pub(crate) gas_price: Felt,
pub(crate) value: Uint256,
pub(crate) calldata: Vec<Felt>,
}
}
fn env_var_to_field_element(var_name: &str) -> Felt {
dotenv().ok();
let env_var = std::env::var(var_name).unwrap_or_else(|_| panic!("Missing environment variable {var_name}"));
Felt::from_str(&env_var).unwrap_or_else(|_| panic!("Invalid hex string for {var_name}"))
}
/// Kakarot address
pub static KAKAROT_ADDRESS: LazyLock<Felt> = LazyLock::new(|| env_var_to_field_element("KAKAROT_ADDRESS"));
/// Uninitialized account class hash
pub static UNINITIALIZED_ACCOUNT_CLASS_HASH: LazyLock<Felt> =
LazyLock::new(|| env_var_to_field_element("UNINITIALIZED_ACCOUNT_CLASS_HASH"));
/// Ethereum send transaction selector
pub static ETH_SEND_TRANSACTION: LazyLock<Felt> = LazyLock::new(|| selector!("eth_send_transaction"));
/// Execute from outside selector
pub static EXECUTE_FROM_OUTSIDE: LazyLock<Felt> = LazyLock::new(|| selector!("execute_from_outside"));
/// Maximum number of felts in calldata
pub static MAX_FELTS_IN_CALLDATA: LazyLock<usize> = LazyLock::new(|| {
usize::from_str(
&std::env::var("MAX_FELTS_IN_CALLDATA")
.unwrap_or_else(|_| panic!("Missing environment variable MAX_FELTS_IN_CALLDATA")),
)
.expect("Failed to parse MAX_FELTS_IN_CALLDATA")
});
pub fn get_white_listed_eip_155_transaction_hashes() -> Vec<B256> {
std::env::var("WHITE_LISTED_EIP_155_TRANSACTION_HASHES")
.unwrap_or_else(|_| panic!("Missing environment variable WHITE_LISTED_EIP_155_TRANSACTION_HASHES"))
.replace(' ', "")
.split(',')
.map(|hash| B256::from_str(hash).unwrap())
.collect()
}
// Kakarot utils
/// Compute the starknet address given a eth address
#[inline]
pub fn starknet_address(address: Address) -> Felt {
let evm_address = into_via_wrapper!(address);
get_contract_address(evm_address, *UNINITIALIZED_ACCOUNT_CLASS_HASH, &[Felt::ONE, evm_address], *KAKAROT_ADDRESS)
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/contracts/mod.rs | src/providers/eth_provider/contracts/mod.rs | pub mod erc20;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/contracts/erc20.rs | src/providers/eth_provider/contracts/erc20.rs | #![allow(clippy::pub_underscore_fields)]
use crate::providers::eth_provider::{
error::ExecutionError,
provider::{EthApiResult, EthereumProvider},
};
use alloy_dyn_abi::DynSolType;
use alloy_eips::BlockId;
use alloy_primitives::{Address, Bytes, TxKind, U256};
use alloy_rpc_types::{request::TransactionInput, TransactionRequest};
use alloy_sol_types::{sol, SolCall};
sol! {
#[sol(rpc)]
contract ERC20Contract {
function balanceOf(address account) external view returns (uint256);
function allowance(address owner, address spender) external view returns (uint256);
function decimals() external view returns (uint8);
function name() external view returns (string);
function symbol() external view returns (string);
}
}
/// Abstraction for a Kakarot ERC20 contract.
#[derive(Debug)]
pub struct EthereumErc20<P: EthereumProvider> {
/// The address of the ERC20 contract.
pub address: Address,
/// The provider for interacting with the Ethereum network.
pub provider: P,
}
impl<P: EthereumProvider> EthereumErc20<P> {
/// Creates a new instance of [`EthereumErc20`].
pub const fn new(address: Address, provider: P) -> Self {
Self { address, provider }
}
/// Gets the balance of the specified address.
pub async fn balance_of(&self, evm_address: Address, block_id: BlockId) -> EthApiResult<U256> {
// Encode the calldata for the balanceOf function call
let calldata = ERC20Contract::balanceOfCall { account: evm_address }.abi_encode();
// Call the contract with the encoded calldata
let ret = self.call_contract(calldata, block_id).await?;
// Deserialize the returned bytes into a U256 balance
let balance = U256::try_from_be_slice(&ret)
.ok_or_else(|| ExecutionError::Other("failed to deserialize balance".to_string()))?;
Ok(balance)
}
/// Gets the number of decimals the token uses.
pub async fn decimals(&self, block_id: BlockId) -> EthApiResult<U256> {
// Encode the calldata for the decimals function call
let calldata = ERC20Contract::decimalsCall {}.abi_encode();
// Call the contract with the encoded calldata
let ret = self.call_contract(calldata, block_id).await?;
// Deserialize the returned bytes into a U256 representing decimals
let decimals = U256::try_from_be_slice(&ret)
.ok_or_else(|| ExecutionError::Other("failed to deserialize decimals".to_string()))?;
Ok(decimals)
}
/// Gets the name of the token.
pub async fn name(&self, block_id: BlockId) -> EthApiResult<String> {
// Encode the calldata for the name function call
let calldata = ERC20Contract::nameCall {}.abi_encode();
// Call the contract with the encoded calldata
let ret = self.call_contract(calldata, block_id).await?;
// Deserialize the returned bytes into a String representing the name
let name = DynSolType::String
.abi_decode(&ret)
.map_err(|_| ExecutionError::Other("failed to deserialize name".to_string()))?;
Ok(name.as_str().unwrap_or_default().to_string())
}
/// Gets the symbol of the token.
pub async fn symbol(&self, block_id: BlockId) -> EthApiResult<String> {
// Encode the calldata for the symbol function call
let calldata = ERC20Contract::symbolCall {}.abi_encode();
// Call the contract with the encoded calldata
let ret = self.call_contract(calldata, block_id).await?;
// Deserialize the returned bytes into a String representing the symbol
let symbol = DynSolType::String
.abi_decode(&ret)
.map_err(|_| ExecutionError::Other("failed to deserialize symbol".to_string()))?;
Ok(symbol.as_str().unwrap_or_default().to_string())
}
/// Gets the allowance the owner has granted to the spender.
pub async fn allowance(&self, owner: Address, spender: Address, block_id: BlockId) -> EthApiResult<U256> {
// Encode the calldata for the allowance function call
let calldata = ERC20Contract::allowanceCall { owner, spender }.abi_encode();
// Call the contract with the encoded calldata
let ret = self.call_contract(calldata, block_id).await?;
// Deserialize the returned bytes into a U256 representing the allowance
let allowance = U256::try_from_be_slice(&ret)
.ok_or_else(|| ExecutionError::Other("failed to deserialize allowance".to_string()))?;
Ok(allowance)
}
/// Calls the contract with the given calldata.
async fn call_contract(&self, calldata: Vec<u8>, block_id: BlockId) -> EthApiResult<Bytes> {
self.provider
.call(
TransactionRequest {
from: Some(Address::default()),
to: Some(TxKind::Call(self.address)),
gas_price: Some(0),
gas: Some(1_000_000),
value: Some(U256::ZERO),
input: TransactionInput { input: Some(calldata.into()), data: None },
..Default::default()
},
Some(block_id),
None,
None,
)
.await
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/ethereum.rs | src/providers/eth_provider/database/ethereum.rs | use super::{
filter,
filter::EthDatabaseFilterBuilder,
types::{
header::{ExtendedBlock, StoredHeader},
transaction::{ExtendedTransaction, StoredTransaction},
},
Database,
};
use crate::providers::eth_provider::{
database::types::transaction::{EthStarknetHashes, StoredEthStarknetTransactionHash},
error::EthApiError,
};
use alloy_consensus::constants::EMPTY_ROOT_HASH;
use alloy_primitives::{B256, U256};
use alloy_rlp::Encodable;
use alloy_rpc_types::{Block, BlockHashOrNumber, BlockTransactions, Header};
use alloy_serde::WithOtherFields;
use async_trait::async_trait;
use mongodb::bson::doc;
use reth_primitives::BlockBody;
use tracing::instrument;
/// Trait for interacting with a database that stores Ethereum typed
/// transaction data.
#[async_trait]
pub trait EthereumTransactionStore {
/// Returns the transaction with the given hash. Returns None if the
/// transaction is not found.
async fn transaction(&self, hash: &B256) -> Result<Option<ExtendedTransaction>, EthApiError>;
/// Returns all transactions for the given block hash or number.
async fn transactions(
&self,
block_hash_or_number: BlockHashOrNumber,
) -> Result<Vec<ExtendedTransaction>, EthApiError>;
/// Upserts the given transaction.
async fn upsert_transaction(&self, transaction: ExtendedTransaction) -> Result<(), EthApiError>;
/// Upserts the given transaction hash mapping (Ethereum -> Starknet).
async fn upsert_transaction_hashes(&self, transaction_hashes: EthStarknetHashes) -> Result<(), EthApiError>;
}
#[async_trait]
impl EthereumTransactionStore for Database {
#[instrument(skip_all, name = "db::transaction", err)]
async fn transaction(&self, hash: &B256) -> Result<Option<ExtendedTransaction>, EthApiError> {
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default().with_tx_hash(hash).build();
Ok(self.get_one::<StoredTransaction>(filter, None).await?.map(Into::into))
}
#[instrument(skip_all, name = "db::transactions", err)]
async fn transactions(
&self,
block_hash_or_number: BlockHashOrNumber,
) -> Result<Vec<ExtendedTransaction>, EthApiError> {
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default()
.with_block_hash_or_number(block_hash_or_number)
.build();
Ok(self.get::<StoredTransaction>(filter, None).await?.into_iter().map(Into::into).collect())
}
#[instrument(skip_all, name = "db::upsert_transaction", err)]
async fn upsert_transaction(&self, transaction: ExtendedTransaction) -> Result<(), EthApiError> {
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default().with_tx_hash(&transaction.hash).build();
Ok(self.update_one(StoredTransaction::from(transaction), filter, true).await?)
}
#[instrument(skip_all, name = "db::upsert_transaction_hashes", err)]
async fn upsert_transaction_hashes(&self, transaction_hashes: EthStarknetHashes) -> Result<(), EthApiError> {
let filter = EthDatabaseFilterBuilder::<filter::EthStarknetTransactionHash>::default()
.with_tx_hash(&transaction_hashes.eth_hash)
.build();
Ok(self.update_one(StoredEthStarknetTransactionHash::from(transaction_hashes), filter, true).await?)
}
}
/// Trait for interacting with a database that stores Ethereum typed
/// blocks.
#[async_trait]
pub trait EthereumBlockStore {
/// Returns the latest block header.
async fn latest_header(&self) -> Result<Option<Header>, EthApiError>;
/// Returns the header for the given hash or number. Returns None if the
/// header is not found.
async fn header(&self, block_hash_or_number: BlockHashOrNumber) -> Result<Option<Header>, EthApiError>;
/// Returns the block for the given hash or number. Returns None if the
/// block is not found.
async fn block(
&self,
block_hash_or_number: BlockHashOrNumber,
full: bool,
) -> Result<Option<ExtendedBlock>, EthApiError>;
/// Returns true if the block with the given hash or number exists.
#[instrument(skip(self), name = "db::block_exists", err)]
async fn block_exists(&self, block_hash_or_number: BlockHashOrNumber) -> Result<bool, EthApiError> {
self.header(block_hash_or_number).await.map(|header| header.is_some())
}
/// Returns the transaction count for the given block hash or number. Returns None if the
/// block is not found.
async fn transaction_count(&self, block_hash_or_number: BlockHashOrNumber) -> Result<Option<U256>, EthApiError>;
}
#[async_trait]
impl EthereumBlockStore for Database {
#[instrument(skip_all, name = "db::latest_header", err)]
async fn latest_header(&self) -> Result<Option<Header>, EthApiError> {
Ok(self
.get_one::<StoredHeader>(None, doc! { "header.number": -1 })
.await
.map(|maybe_sh| maybe_sh.map(Into::into))?)
}
#[instrument(skip_all, name = "db::header", err)]
async fn header(&self, block_hash_or_number: BlockHashOrNumber) -> Result<Option<Header>, EthApiError> {
let filter = EthDatabaseFilterBuilder::<filter::Header>::default()
.with_block_hash_or_number(block_hash_or_number)
.build();
Ok(self
.get_one::<StoredHeader>(filter, None)
.await
.map_err(|_| EthApiError::UnknownBlock(block_hash_or_number))?
.map(Into::into))
}
#[instrument(skip_all, name = "db::block", err)]
async fn block(
&self,
block_hash_or_number: BlockHashOrNumber,
full: bool,
) -> Result<Option<ExtendedBlock>, EthApiError> {
let maybe_header = self.header(block_hash_or_number).await?;
if maybe_header.is_none() {
return Ok(None);
}
let header = maybe_header.unwrap();
// The withdrawals are not supported, hence the withdrawals_root should always be empty.
if let Some(withdrawals_root) = header.withdrawals_root {
if withdrawals_root != EMPTY_ROOT_HASH {
return Err(EthApiError::Unsupported("withdrawals"));
}
}
let transactions = self.transactions(block_hash_or_number).await?;
let block_transactions = if full {
BlockTransactions::Full(transactions.clone())
} else {
BlockTransactions::Hashes(transactions.iter().map(|tx| tx.hash).collect())
};
let block = reth_primitives::Block {
body: BlockBody {
transactions: transactions.into_iter().map(TryFrom::try_from).collect::<Result<_, _>>()?,
withdrawals: Some(Default::default()),
..Default::default()
},
header: header.clone().try_into()?,
};
// This is how Reth computes the block size.
// `https://github.com/paradigmxyz/reth/blob/v0.2.0-beta.5/crates/rpc/rpc-types-compat/src/block.rs#L66`
let size = block.length();
Ok(Some(WithOtherFields::new(Block {
header,
transactions: block_transactions,
size: Some(U256::from(size)),
withdrawals: Some(Default::default()),
..Default::default()
})))
}
#[instrument(skip_all, name = "db::transaction_count", err)]
async fn transaction_count(&self, block_hash_or_number: BlockHashOrNumber) -> Result<Option<U256>, EthApiError> {
if !self.block_exists(block_hash_or_number).await? {
return Ok(None);
}
let filter = EthDatabaseFilterBuilder::<filter::Transaction>::default()
.with_block_hash_or_number(block_hash_or_number)
.build();
let count = self.count::<StoredTransaction>(filter).await?;
Ok(Some(U256::from(count)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::mongo::{MongoFuzzer, RANDOM_BYTES_SIZE};
use arbitrary::Arbitrary;
use rand::{self, Rng};
use starknet::core::types::Felt;
#[tokio::test(flavor = "multi_thread")]
async fn test_ethereum_transaction_store() {
// Initialize MongoDB fuzzer
let mut mongo_fuzzer = MongoFuzzer::new(RANDOM_BYTES_SIZE).await;
// Mock a database with 100 transactions, receipts, and headers
let database = mongo_fuzzer.mock_database(100).await;
// Generate random bytes for test data
let bytes: Vec<u8> = (0..RANDOM_BYTES_SIZE).map(|_| rand::random()).collect();
let mut unstructured = arbitrary::Unstructured::new(&bytes);
// Test fetching existing and non existing transactions by their hash
test_get_transaction(&database, &mongo_fuzzer, &mut unstructured).await;
// Test fetching transactions by their block hash
test_get_transactions_by_block_hash(&database, &mongo_fuzzer).await;
// Test fetching transactions by their block number
test_get_transactions_by_block_number(&database, &mongo_fuzzer).await;
// Test upserting transactions into the database
test_upsert_transactions(&mut unstructured, &database).await;
}
async fn test_get_transaction(
database: &Database,
mongo_fuzzer: &MongoFuzzer,
unstructured: &mut arbitrary::Unstructured<'_>,
) {
// Fetch the first transaction from the mock database
let first_transaction = mongo_fuzzer.transactions.first().unwrap();
// Test retrieving an existing transaction by its hash
assert_eq!(database.transaction(&first_transaction.hash).await.unwrap(), Some(first_transaction.into()));
// Generate a transaction not present in the database
let unstored_transaction = StoredTransaction::arbitrary(unstructured).unwrap();
// Test retrieving a non-existent transaction by its hash
assert_eq!(database.transaction(&unstored_transaction.hash).await.unwrap(), None);
}
async fn test_get_transactions_by_block_hash(database: &Database, mongo_fuzzer: &MongoFuzzer) {
// Fetch the first block hash from the mock database
let first_block_hash = mongo_fuzzer.headers.first().unwrap().hash;
// Fetch transactions belonging to the first block hash
let transactions_first_block_hash = mongo_fuzzer
.transactions
.iter()
.filter(|tx| tx.block_hash.unwrap() == first_block_hash)
.map(Into::into)
.collect::<Vec<_>>();
// Test retrieving transactions by block hash
assert_eq!(database.transactions(first_block_hash.into()).await.unwrap(), transactions_first_block_hash);
}
async fn test_get_transactions_by_block_number(database: &Database, mongo_fuzzer: &MongoFuzzer) {
// Fetch the first block number from the mock database
let first_block_number = mongo_fuzzer.headers.first().unwrap().number;
// Fetch transactions belonging to the first block number
let transactions_first_block_number = mongo_fuzzer
.transactions
.iter()
.filter(|tx| tx.block_number.unwrap() == first_block_number)
.map(Into::into)
.collect::<Vec<_>>();
// Test retrieving transactions by block number
assert_eq!(database.transactions(first_block_number.into()).await.unwrap(), transactions_first_block_number);
}
async fn test_upsert_transactions(unstructured: &mut arbitrary::Unstructured<'_>, database: &Database) {
// Generate and upsert a mock transaction into the database
let mock_transaction = StoredTransaction::arbitrary(unstructured).unwrap();
database.upsert_transaction(mock_transaction.clone().tx).await.unwrap();
// Test retrieving an upserted transaction by its hash
assert_eq!(database.transaction(&mock_transaction.hash).await.unwrap(), Some(mock_transaction.into()));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_ethereum_block_store() {
// Initialize MongoDB fuzzer
let mut mongo_fuzzer = MongoFuzzer::new(RANDOM_BYTES_SIZE).await;
// Mock a database with 100 transactions, receipts, and headers
let database = mongo_fuzzer.mock_database(100).await;
// Generate random bytes for test data
let bytes: Vec<u8> = (0..RANDOM_BYTES_SIZE).map(|_| rand::random()).collect();
let mut unstructured = arbitrary::Unstructured::new(&bytes);
// Test fetching existing and none existing header via blockhash and blocknumber from database
test_get_header(&database, &mongo_fuzzer).await;
// Test fetching existing and none existing block via blockhash and blocknumber from database
test_get_blocks(&database, &mongo_fuzzer, &mut unstructured).await;
// Test fetching existing and none existing transaction counts via blockhash and blocknumber from database
test_get_transaction_count(&database, &mongo_fuzzer).await;
}
async fn test_get_header(database: &Database, mongo_fuzzer: &MongoFuzzer) {
let header_block_hash = &mongo_fuzzer.headers.first().unwrap().header;
// Test retrieving header by block hash
assert_eq!(database.header(header_block_hash.hash.into()).await.unwrap().unwrap(), *header_block_hash);
// Test retrieving header by block number
assert_eq!(database.header(header_block_hash.number.into()).await.unwrap().unwrap(), *header_block_hash);
let mut rng = rand::thread_rng();
// Test retrieving non-existing header by block hash
assert_eq!(database.header(rng.gen::<B256>().into()).await.unwrap(), None);
// Test retrieving non-existing header by block number
assert_eq!(database.header(rng.gen::<u64>().into()).await.unwrap(), None);
}
async fn test_get_blocks(database: &Database, mongo_fuzzer: &MongoFuzzer, u: &mut arbitrary::Unstructured<'_>) {
let header = &mongo_fuzzer.headers.first().unwrap().header;
let block_hash = header.hash;
let block: ExtendedBlock = {
let transactions: Vec<ExtendedTransaction> = mongo_fuzzer
.transactions
.iter()
.filter_map(|stored_transaction| {
if stored_transaction.block_hash.unwrap() == block_hash {
Some(stored_transaction.into())
} else {
None
}
})
.collect();
let block_transactions = BlockTransactions::Full(transactions.clone());
let signed_transactions =
transactions.into_iter().map(TryFrom::try_from).collect::<Result<_, _>>().unwrap();
let block = reth_primitives::Block {
body: BlockBody {
transactions: signed_transactions,
withdrawals: Some(Default::default()),
..Default::default()
},
header: reth_primitives::Header::try_from(header.clone()).unwrap(),
};
let size = block.length();
WithOtherFields::new(Block {
header: header.clone(),
transactions: block_transactions,
size: Some(U256::from(size)),
withdrawals: Some(Default::default()),
..Default::default()
})
};
// Test retrieving block by block hash
assert_eq!(database.block(block_hash.into(), true).await.unwrap().unwrap(), block);
// Test retrieving block by block number
assert_eq!(database.block(header.number.into(), true).await.unwrap().unwrap(), block);
let mut rng = rand::thread_rng();
// Test retrieving non-existing block by block hash
assert_eq!(database.block(rng.gen::<B256>().into(), false).await.unwrap(), None);
// Test retrieving non-existing block by block number
assert_eq!(database.block(rng.gen::<u64>().into(), false).await.unwrap(), None);
// test withdrawals_root raises an error
let mut faulty_header = StoredHeader::arbitrary(u).unwrap();
faulty_header.header.withdrawals_root = Some(rng.gen::<B256>());
let filter = EthDatabaseFilterBuilder::<filter::Header>::default().with_block_hash(&faulty_header.hash).build();
database.update_one(faulty_header.clone(), filter, true).await.expect("Failed to update header in database");
assert!(database.block(faulty_header.hash.into(), true).await.is_err());
}
async fn test_get_transaction_count(database: &Database, mongo_fuzzer: &MongoFuzzer) {
let header_block_hash = &mongo_fuzzer.headers.first().unwrap().header;
let first_block_hash = header_block_hash.hash;
let transaction_count: U256 = U256::from(
mongo_fuzzer
.transactions
.iter()
.filter(|transaction| transaction.tx.block_hash.unwrap() == first_block_hash)
.count(),
);
// Test retrieving header by block hash
assert_eq!(database.transaction_count(first_block_hash.into()).await.unwrap().unwrap(), transaction_count);
let mut rng = rand::thread_rng();
// Test retrieving non-existing transaction count by block hash
assert_eq!(database.transaction_count(rng.gen::<B256>().into()).await.unwrap(), None);
// Test retrieving non-existing transaction count by block number
assert_eq!(database.transaction_count(rng.gen::<u64>().into()).await.unwrap(), None);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_upsert_transaction_hashes() {
// Initialize MongoDB fuzzer
let mut mongo_fuzzer = MongoFuzzer::new(RANDOM_BYTES_SIZE).await;
// Mock a database with sample data
let database = mongo_fuzzer.mock_database(1).await;
// Generate random Ethereum and Starknet hashes
let eth_hash = B256::random();
let starknet_hash =
Felt::from_hex("0x03d937c035c878245caf64531a5756109c53068da139362728feb561405371cb").unwrap();
// Define an EthStarknetHashes instance for testing
let transaction_hashes = EthStarknetHashes { eth_hash, starknet_hash };
// First, upsert the transaction hash mapping (should insert as it doesn't exist initially)
database
.upsert_transaction_hashes(transaction_hashes.clone())
.await
.expect("Failed to upsert transaction hash mapping");
// Retrieve the inserted transaction hash mapping and verify it matches the inserted values
let filter =
EthDatabaseFilterBuilder::<filter::EthStarknetTransactionHash>::default().with_tx_hash(ð_hash).build();
let stored_mapping: Option<StoredEthStarknetTransactionHash> =
database.get_one(filter.clone(), None).await.expect("Failed to retrieve transaction hash mapping");
assert_eq!(
stored_mapping,
Some(StoredEthStarknetTransactionHash::from(transaction_hashes.clone())),
"The transaction hash mapping was not inserted correctly"
);
// Now, modify the Starknet hash and upsert the modified transaction hash mapping
let new_starknet_hash =
Felt::from_hex("0x0208a0a10250e382e1e4bbe2880906c2791bf6275695e02fbbc6aeff9cd8b31a").unwrap();
let updated_transaction_hashes = EthStarknetHashes { eth_hash, starknet_hash: new_starknet_hash };
database
.upsert_transaction_hashes(updated_transaction_hashes.clone())
.await
.expect("Failed to update transaction hash mapping");
// Retrieve the updated transaction hash mapping and verify it matches the updated values
let updated_mapping: Option<StoredEthStarknetTransactionHash> =
database.get_one(filter, None).await.expect("Failed to retrieve updated transaction hash mapping");
assert_eq!(
updated_mapping,
Some(StoredEthStarknetTransactionHash::from(updated_transaction_hashes)),
"The transaction hash mapping was not updated correctly"
);
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/filter.rs | src/providers/eth_provider/database/filter.rs | use crate::providers::eth_provider::constant::{
ADDRESS_HEX_STRING_LEN, BLOCK_NUMBER_HEX_STRING_LEN, HASH_HEX_STRING_LEN, LOGS_TOPICS_HEX_STRING_LEN,
U64_HEX_STRING_LEN,
};
use alloy_primitives::{Address, B256};
use alloy_rpc_types::{BlockHashOrNumber, Index, Topic};
use mongodb::bson::{doc, Document};
use std::fmt::{Display, LowerHex};
/// A trait that defines possible key filters for blocks in the
/// Ethereum database.
pub trait BlockFiltering {
/// Returns the key for the block hash.
fn block_hash(&self) -> &'static str;
/// Returns the key for the block number.
fn block_number(&self) -> &'static str;
}
/// A trait that defines possible key filters for transactions in the
/// Ethereum database.
pub trait TransactionFiltering {
/// Returns the key for the transaction hash.
fn transaction_hash(&self) -> &'static str;
/// Returns the key for the transaction index in the block.
fn transaction_index(&self) -> &'static str;
}
/// A trait that defines possible key filters for logs in the
/// Ethereum database.
pub trait LogFiltering {
/// Returns the key for the transaction hash.
fn address(&self) -> &'static str;
}
/// A type used for a mapping between:
/// - An Ethereum transaction hash
/// - A Starknet transaction hash.
#[derive(Debug, Default)]
pub struct EthStarknetTransactionHash;
impl Display for EthStarknetTransactionHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "hashes")
}
}
impl TransactionFiltering for EthStarknetTransactionHash {
fn transaction_hash(&self) -> &'static str {
"eth_hash"
}
fn transaction_index(&self) -> &'static str {
""
}
}
/// A transaction type used as a target for the filter.
#[derive(Debug, Default)]
pub struct Transaction;
impl Display for Transaction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "tx")
}
}
impl BlockFiltering for Transaction {
fn block_hash(&self) -> &'static str {
"blockHash"
}
fn block_number(&self) -> &'static str {
"blockNumber"
}
}
impl TransactionFiltering for Transaction {
fn transaction_hash(&self) -> &'static str {
"hash"
}
fn transaction_index(&self) -> &'static str {
"transactionIndex"
}
}
/// A receipt type used as a target for the filter.
#[derive(Debug, Default)]
pub struct Receipt;
impl Display for Receipt {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "receipt")
}
}
impl BlockFiltering for Receipt {
fn block_hash(&self) -> &'static str {
"blockHash"
}
fn block_number(&self) -> &'static str {
"blockNumber"
}
}
impl TransactionFiltering for Receipt {
fn transaction_hash(&self) -> &'static str {
"transactionHash"
}
fn transaction_index(&self) -> &'static str {
"transactionIndex"
}
}
/// A header type used as a target for the filter.
#[derive(Debug, Default)]
pub struct Header;
impl Display for Header {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "header")
}
}
impl BlockFiltering for Header {
fn block_hash(&self) -> &'static str {
"hash"
}
fn block_number(&self) -> &'static str {
"number"
}
}
/// A log type used as a target for the filter.
#[derive(Debug, Default)]
pub struct Log;
impl Display for Log {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "log")
}
}
impl BlockFiltering for Log {
fn block_hash(&self) -> &'static str {
"blockHash"
}
fn block_number(&self) -> &'static str {
"blockNumber"
}
}
impl LogFiltering for Log {
fn address(&self) -> &'static str {
"address"
}
}
/// Builder for creating a filter on the Ethereum database.
#[derive(Debug, Default)]
pub struct EthDatabaseFilterBuilder<T: Default> {
/// The filter to apply.
pub filter: Document,
/// The target type.
pub target: T,
}
impl<T: BlockFiltering + Display + Default> EthDatabaseFilterBuilder<T> {
/// Adds a filter on the block hash.
#[must_use]
pub fn with_block_hash(mut self, hash: &B256) -> Self {
let key = format!("{}.{}", self.target, self.target.block_hash());
self.filter.insert(key, format_hex(hash, HASH_HEX_STRING_LEN));
self
}
/// Adds a filter on the block number.
#[must_use]
pub fn with_block_number(mut self, number: u64) -> Self {
let key = format!("{}.{}", self.target, self.target.block_number());
self.filter.insert(key, format_hex(number, BLOCK_NUMBER_HEX_STRING_LEN));
self
}
/// Adds a filter on the block hash or number.
#[must_use]
pub fn with_block_hash_or_number(self, block_hash_or_number: BlockHashOrNumber) -> Self {
match block_hash_or_number {
BlockHashOrNumber::Hash(hash) => self.with_block_hash(&hash),
BlockHashOrNumber::Number(number) => self.with_block_number(number),
}
}
}
impl<T: TransactionFiltering + Display + Default> EthDatabaseFilterBuilder<T> {
/// Adds a filter on the transaction hash.
#[must_use]
pub fn with_tx_hash(mut self, hash: &B256) -> Self {
let key = format!("{}.{}", self.target, self.target.transaction_hash());
self.filter.insert(key, format_hex(hash, BLOCK_NUMBER_HEX_STRING_LEN));
self
}
/// Adds a filter on the transaction index in the block.
#[must_use]
pub fn with_tx_index(mut self, index: &Index) -> Self {
let index: usize = (*index).into();
let key = format!("{}.{}", self.target, self.target.transaction_index());
self.filter.insert(key, format_hex(index, U64_HEX_STRING_LEN));
self
}
}
impl<T: LogFiltering + BlockFiltering + Display + Default> EthDatabaseFilterBuilder<T> {
/// Adds a filter on the log address.
#[must_use]
pub fn with_addresses(mut self, addresses: &[Address]) -> Self {
if addresses.is_empty() {
return self;
}
let key = format!("{}.{}", self.target, self.target.address());
self.filter.insert(
key,
doc! {"$in": addresses.iter().map(|a| format_hex(a, ADDRESS_HEX_STRING_LEN)).collect::<Vec<_>>()},
);
self
}
/// Adds a filter on the block number range.
#[must_use]
pub fn with_block_number_range(mut self, from: u64, to: u64) -> Self {
let key = format!("{}.{}", self.target, self.target.block_number());
self.filter.insert(
key,
doc! {"$gte": format_hex(from, BLOCK_NUMBER_HEX_STRING_LEN), "$lte": format_hex(to, BLOCK_NUMBER_HEX_STRING_LEN)},
);
self
}
/// Adds a filter on the topics.
#[must_use]
pub fn with_topics(mut self, topics: &[Topic; 4]) -> Self {
let mut filter = vec![];
// If all topics are None, return a filter that checks if the log.topics field exists
if topics.iter().all(Topic::is_empty) {
self.filter.insert("log.topics", doc! {"$exists": true});
return self;
}
// Iterate over the topics and add the filter to the filter vector
for (index, topic_set) in topics.iter().enumerate() {
let key = format!("log.topics.{index}");
let topics: Vec<_> =
topic_set.clone().into_iter().map(|t| format_hex(t, LOGS_TOPICS_HEX_STRING_LEN)).collect();
if topics.len() == 1 {
// If the topic array has only one element, use an equality filter
filter.push(doc! {key: topics[0].clone()});
} else if !topics.is_empty() {
// If the topic array has more than one element, use an $in filter
filter.push(doc! {key: {"$in": topics}});
}
}
self.filter.extend(doc! {"$and": filter});
self
}
}
impl<T: Default> EthDatabaseFilterBuilder<T> {
/// Consumes the builder and returns the filter and sorting.
pub fn build(self) -> Document {
self.filter
}
}
pub fn format_hex(value: impl LowerHex, width: usize) -> String {
// Add 2 to the width to account for the 0x prefix.
let s = format!("{:#0width$x}", value, width = width + 2);
// `s.len() < width` can happen because of the LowerHex implementation
// for Uint, which just formats 0 into 0x0, ignoring the width.
if s.len() < width {
return format!("0x{:0>width$}", &s[2..], width = width);
}
s
}
/// Converts a key and value into a `MongoDB` filter.
pub fn into_filter<T>(key: &str, value: &T, width: usize) -> Document
where
T: LowerHex,
{
doc! {key: format_hex(value, width)}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::b256;
use alloy_rpc_types::FilterSet;
#[test]
fn test_into_filter_with_padding() {
assert_eq!(into_filter::<u64>("test_key", &0x1234, 10), doc! {"test_key": "0x0000001234"});
assert_eq!(
into_filter::<B256>(
"test_key",
&b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"),
64
),
doc! {"test_key": "0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"}
);
assert_eq!(
into_filter::<B256>("test_key", &B256::default(), 64),
doc! {"test_key": format!("0x{}", "0".repeat(64))}
);
}
#[test]
fn test_transaction_block_hash_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Transaction>::default();
// When
let filter = builder.with_block_hash(&B256::left_padding_from(&[1])).build();
// Then
assert_eq!(filter, doc! {"tx.blockHash": "0x0000000000000000000000000000000000000000000000000000000000000001"});
}
#[test]
fn test_transaction_block_number_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Transaction>::default();
// When
let filter = builder.with_block_number(1).build();
// Then
assert_eq!(filter, doc! {"tx.blockNumber": "0x0000000000000001"});
}
#[test]
fn test_transaction_block_hash_and_index_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Transaction>::default();
// When
let filter = builder.with_block_hash(&B256::left_padding_from(&[1])).with_tx_index(&10usize.into()).build();
// Then
assert_eq!(
filter,
doc! {
"tx.blockHash": "0x0000000000000000000000000000000000000000000000000000000000000001",
"tx.transactionIndex": "0x000000000000000a"
}
);
}
#[test]
fn test_transaction_block_number_and_index_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Transaction>::default();
// When
let filter = builder.with_block_number(1).with_tx_index(&10usize.into()).build();
// Then
assert_eq!(
filter,
doc! {
"tx.blockNumber": "0x0000000000000001",
"tx.transactionIndex": "0x000000000000000a"
}
);
}
#[test]
fn test_receipt_transaction_hash_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Receipt>::default();
// When
let filter = builder.with_tx_hash(&B256::left_padding_from(&[1])).build();
// Then
assert_eq!(
filter,
doc! {"receipt.transactionHash": "0x0000000000000000000000000000000000000000000000000000000000000001"}
);
}
#[test]
fn test_receipt_block_number_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Receipt>::default();
// When
let filter = builder.with_block_number(1).build();
// Then
assert_eq!(filter, doc! {"receipt.blockNumber": "0x0000000000000001"});
}
#[test]
fn test_receipt_block_hash_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Receipt>::default();
// When
let filter = builder.with_block_hash(&B256::left_padding_from(&[1])).build();
// Then
assert_eq!(
filter,
doc! {"receipt.blockHash": "0x0000000000000000000000000000000000000000000000000000000000000001"}
);
}
#[test]
fn test_header_block_hash_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Header>::default();
// When
let filter = builder.with_block_hash(&B256::left_padding_from(&[1])).build();
// Then
assert_eq!(filter, doc! {"header.hash": "0x0000000000000000000000000000000000000000000000000000000000000001"});
}
#[test]
fn test_header_block_number_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Header>::default();
// When
let filter = builder.with_block_number(1).build();
// Then
assert_eq!(filter, doc! {"header.number": "0x0000000000000001"});
}
#[test]
fn test_log_block_hash_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Log>::default();
// When
let filter = builder.with_block_hash(&B256::left_padding_from(&[1])).build();
// Then
assert_eq!(
filter,
doc! {"log.blockHash": "0x0000000000000000000000000000000000000000000000000000000000000001"}
);
}
#[test]
fn test_log_block_number_range_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Log>::default();
// When
let filter = builder.with_block_number_range(1, 10).build();
// Then
assert_eq!(filter, doc! {"log.blockNumber": {"$gte": "0x0000000000000001", "$lte": "0x000000000000000a"}});
}
#[test]
fn test_log_empty_addresses_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Log>::default();
// When
let filter = builder.with_addresses(&[]).build();
// Then
assert_eq!(filter, doc! {});
}
#[test]
fn test_log_addresses_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Log>::default();
// When
let filter =
builder.with_addresses(&[Address::left_padding_from(&[1]), Address::left_padding_from(&[2])]).build();
// Then
assert_eq!(
filter,
doc! {
"log.address": {
"$in": ["0x0000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000002"]
}
}
);
}
#[test]
fn test_log_topics_empty_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Log>::default();
let topics = [Topic::default(), Topic::default(), Topic::default(), Topic::default()];
// When
let filter = builder.with_topics(&topics).build();
// Then
assert_eq!(filter, doc! { "log.topics": {"$exists": true} });
}
#[test]
fn test_log_topics_filter() {
// Given
let builder = EthDatabaseFilterBuilder::<Log>::default();
let topics: [FilterSet<B256>; 4] = [
vec![B256::left_padding_from(&[1]), B256::left_padding_from(&[2])].into(),
B256::left_padding_from(&[3]).into(),
B256::left_padding_from(&[4]).into(),
vec![B256::left_padding_from(&[5]), B256::left_padding_from(&[6])].into(),
];
// When
let filter = builder.with_topics(&topics).build();
// Then
let and_filter = filter.get("$and").unwrap().as_array().unwrap();
let first_topic_filter = and_filter[0].as_document().unwrap().clone();
assert!(
first_topic_filter
== doc! { "log.topics.0": {"$in": ["0x0000000000000000000000000000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000000000000000000000000000002"]} }
|| first_topic_filter
== doc! { "log.topics.0": {"$in": ["0x0000000000000000000000000000000000000000000000000000000000000002", "0x0000000000000000000000000000000000000000000000000000000000000001"]} }
);
assert_eq!(
and_filter[1].as_document().unwrap().clone(),
doc! { "log.topics.1": "0x0000000000000000000000000000000000000000000000000000000000000003" }
);
assert_eq!(
and_filter[2].as_document().unwrap().clone(),
doc! { "log.topics.2": "0x0000000000000000000000000000000000000000000000000000000000000004" }
);
let fourth_topic_filter = and_filter[3].as_document().unwrap().clone();
assert!(
fourth_topic_filter
== doc! { "log.topics.3": {"$in": ["0x0000000000000000000000000000000000000000000000000000000000000005", "0x0000000000000000000000000000000000000000000000000000000000000006"]} }
|| fourth_topic_filter
== doc! { "log.topics.3": {"$in": ["0x0000000000000000000000000000000000000000000000000000000000000006", "0x0000000000000000000000000000000000000000000000000000000000000005"]} }
);
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/state.rs | src/providers/eth_provider/database/state.rs | use crate::providers::eth_provider::{error::EthApiError, provider::EthereumProvider};
use alloy_primitives::{Address, B256, U256};
use alloy_rpc_types::{serde_helpers::JsonStorageKey, BlockId};
use reth_revm::{
db::CacheDB,
primitives::{AccountInfo, Bytecode},
DatabaseRef,
};
use tokio::runtime::Handle;
#[derive(Debug, Clone)]
pub struct EthCacheDatabase<P: EthereumProvider + Send + Sync>(pub CacheDB<EthDatabase<P>>);
/// Ethereum database type.
#[derive(Debug, Clone)]
pub struct EthDatabase<P: EthereumProvider + Send + Sync> {
/// The Ethereum provider.
provider: P,
/// The block ID.
block_id: BlockId,
}
impl<P: EthereumProvider + Send + Sync> EthDatabase<P> {
pub const fn new(provider: P, block_id: BlockId) -> Self {
Self { provider, block_id }
}
}
/// The [`DatabaseRef`] trait implementation for [`EthDatabase`].
///
/// This implementation is designed to handle database interactions in a manner that is compatible
/// with both synchronous and asynchronous Rust contexts. Given the constraints of the underlying
/// database operations, it's necessary to perform blocking calls in a controlled manner to avoid
/// blocking the asynchronous runtime.
///
/// ### Why Use `tokio::task::block_in_place`?
///
/// The `tokio::task::block_in_place` function is employed here to enter a blocking context safely
/// within an asynchronous environment. This allows the blocking database operations to be executed
/// without hindering the performance of other asynchronous tasks or blocking the runtime.
impl<P: EthereumProvider + Send + Sync> DatabaseRef for EthDatabase<P> {
type Error = EthApiError;
/// Returns the account information for the given address without caching.
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
tokio::task::block_in_place(|| {
let account_info = Handle::current().block_on(async {
let bytecode = self.provider.get_code(address, Some(self.block_id));
let nonce = self.provider.transaction_count(address, Some(self.block_id));
let balance = self.provider.balance(address, Some(self.block_id));
let (bytecode, nonce, balance) = tokio::join!(bytecode, nonce, balance);
let bytecode = Bytecode::new_raw(bytecode?);
let code_hash = bytecode.hash_slow();
Result::<_, EthApiError>::Ok(AccountInfo {
nonce: nonce?.to(),
balance: balance?,
code: Some(bytecode),
code_hash,
})
})?;
Ok(Some(account_info))
})
}
/// Returns the code for the given code hash.
/// TODO: Implement this method in the provider
fn code_by_hash_ref(&self, _code_hash: B256) -> Result<Bytecode, Self::Error> {
Ok(Default::default())
}
/// Returns the storage value for the given address and index without caching.
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
tokio::task::block_in_place(|| {
let storage = Handle::current().block_on(async {
let value = self
.provider
.storage_at(
address,
JsonStorageKey(B256::from_slice(&index.to_be_bytes::<32>())),
Some(self.block_id),
)
.await?;
Result::<_, EthApiError>::Ok(value)
})?;
let storage = U256::from_be_bytes(storage.0);
Ok(storage)
})
}
/// Returns the block hash for the given block number without caching.
fn block_hash_ref(&self, block_number: u64) -> Result<B256, Self::Error> {
tokio::task::block_in_place(|| {
let hash = Handle::current().block_on(async {
let hash = self
.provider
.block_by_number(block_number.into(), false)
.await?
.ok_or(EthApiError::UnknownBlock(block_number.into()))?
.header
.hash;
Result::<_, EthApiError>::Ok(hash)
})?;
Ok(hash)
})
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/mod.rs | src/providers/eth_provider/database/mod.rs | pub mod ethereum;
pub mod filter;
pub mod state;
pub mod types;
use super::error::KakarotError;
use crate::providers::eth_provider::database::types::{
header::StoredHeader,
log::StoredLog,
receipt::StoredTransactionReceipt,
transaction::{StoredEthStarknetTransactionHash, StoredTransaction},
};
use futures::TryStreamExt;
use itertools::Itertools;
use mongodb::{
bson::{doc, Document},
options::{FindOneOptions, FindOptions, UpdateModifications, UpdateOptions},
Collection, Database as MongoDatabase,
};
use serde::{de::DeserializeOwned, Serialize};
type DatabaseResult<T> = eyre::Result<T, KakarotError>;
/// Struct for encapsulating find options for `MongoDB` queries.
#[derive(Clone, Debug, Default)]
pub struct FindOpts(FindOptions);
impl FindOpts {
/// Sets the limit for the number of documents to retrieve.
#[must_use]
pub fn with_limit(mut self, limit: u64) -> Self {
self.0.limit = Some(i64::try_from(limit).unwrap_or(i64::MAX));
self
}
/// Sets the projection for the documents to retrieve.
#[must_use]
pub fn with_projection(mut self, projection: Document) -> Self {
self.0.projection = Some(projection);
self
}
/// Builds and returns the `FindOptions`.
pub fn build(self) -> FindOptions {
self.0
}
}
/// Wrapper around a `MongoDB` database
#[derive(Clone, Debug)]
pub struct Database(MongoDatabase);
impl Database {
pub const fn new(database: MongoDatabase) -> Self {
Self(database)
}
/// Get a reference to the inner `MongoDatabase`
pub const fn inner(&self) -> &MongoDatabase {
&self.0
}
/// Get a mutable reference to the inner `MongoDatabase`
pub fn inner_mut(&mut self) -> &mut MongoDatabase {
&mut self.0
}
/// Returns a collection from the database.
pub fn collection<T>(&self) -> Collection<T>
where
T: CollectionName + Sync + Send,
{
self.0.collection::<T>(T::collection_name())
}
/// Get a list of documents from a collection
pub async fn get<T>(
&self,
filter: impl Into<Option<Document>>,
find_options: impl Into<Option<FindOpts>>,
) -> DatabaseResult<Vec<T>>
where
T: DeserializeOwned + CollectionName + Sync + Send,
{
let find_options = find_options.into();
Ok(self
.collection::<T>()
.find(Into::<Option<Document>>::into(filter).unwrap_or_default())
.with_options(find_options.unwrap_or_default().build())
.await?
.try_collect()
.await?)
}
/// Get all documents from a collection
pub async fn get_all<T>(&self) -> DatabaseResult<Vec<T>>
where
T: DeserializeOwned + CollectionName + Sync + Send,
{
let find_options = FindOpts::default().build();
Ok(self.collection::<T>().find(Default::default()).with_options(find_options).await?.try_collect().await?)
}
/// Retrieves documents from a collection and converts them into another type.
///
/// Returns a vector of documents of type `D` if successful, or an error.
pub async fn get_and_map_to<D, T>(
&self,
filter: impl Into<Option<Document>>,
find_options: Option<FindOpts>,
) -> DatabaseResult<Vec<D>>
where
T: DeserializeOwned + CollectionName + Sync + Send,
D: From<T>,
{
let stored_data: Vec<T> = self.get(filter, find_options).await?;
Ok(stored_data.into_iter().map_into().collect())
}
/// Retrieves all documents from a collection and converts them into another type.
///
/// Returns a vector of documents of type `D` if successful, or an error.
pub async fn get_all_and_map_to<D, T>(&self) -> DatabaseResult<Vec<D>>
where
T: DeserializeOwned + CollectionName + Sync + Send,
D: From<T>,
{
let stored_data: Vec<T> = self.get_all().await?;
Ok(stored_data.into_iter().map_into().collect())
}
/// Get a single document from a collection
pub async fn get_one<T>(
&self,
filter: impl Into<Option<Document>>,
sort: impl Into<Option<Document>>,
) -> DatabaseResult<Option<T>>
where
T: DeserializeOwned + Unpin + Send + Sync + CollectionName,
{
let find_one_options = FindOneOptions::builder().sort(sort).build();
Ok(self
.collection::<T>()
.find_one(Into::<Option<Document>>::into(filter).unwrap_or_default())
.with_options(find_one_options)
.await?)
}
/// Get the first document from a collection
pub async fn get_first<T>(&self) -> DatabaseResult<Option<T>>
where
T: DeserializeOwned + Unpin + Send + Sync + CollectionName,
{
Ok(self.collection::<T>().find_one(Default::default()).await?)
}
/// Get a single document from aggregated collections
pub async fn get_one_aggregate<T>(&self, pipeline: impl IntoIterator<Item = Document>) -> DatabaseResult<Option<T>>
where
T: DeserializeOwned + CollectionName + Sync + Send,
{
let mut cursor = self.collection::<T>().aggregate(pipeline).await?;
Ok(cursor.try_next().await?.map(|doc| mongodb::bson::de::from_document(doc)).transpose()?)
}
/// Update a single document in a collection
pub async fn update_one<T>(&self, doc: T, filter: impl Into<Document>, upsert: bool) -> DatabaseResult<()>
where
T: Serialize + CollectionName + Sync + Send,
{
let doc = mongodb::bson::to_document(&doc).map_err(mongodb::error::Error::custom)?;
let update_options = UpdateOptions::builder().upsert(upsert).build();
self.collection::<T>()
.update_one(filter.into(), UpdateModifications::Document(doc! {"$set": doc}))
.with_options(update_options)
.await?;
Ok(())
}
/// Delete a single document from a collection
pub async fn delete_one<T>(&self, filter: impl Into<Document>) -> DatabaseResult<()>
where
T: CollectionName + Sync + Send,
{
self.collection::<T>().delete_one(filter.into()).await?;
Ok(())
}
/// Count the number of documents in a collection matching the filter
pub async fn count<T>(&self, filter: Document) -> DatabaseResult<u64>
where
T: CollectionName + Sync + Send,
{
Ok(self.collection::<T>().count_documents(filter).await?)
}
}
impl From<MongoDatabase> for Database {
fn from(database: MongoDatabase) -> Self {
Self(database)
}
}
/// Trait for associating a type with its collection name
pub trait CollectionName {
/// Returns the name of the collection associated with the type
fn collection_name() -> &'static str;
}
/// Implement [`CollectionName`] for [`StoredHeader`]
impl CollectionName for StoredHeader {
fn collection_name() -> &'static str {
"headers"
}
}
/// Implement [`CollectionName`] for [`StoredTransaction`]
impl CollectionName for StoredTransaction {
fn collection_name() -> &'static str {
"transactions"
}
}
/// Implement [`CollectionName`] for [`StoredTransactionReceipt`]
impl CollectionName for StoredTransactionReceipt {
fn collection_name() -> &'static str {
"receipts"
}
}
/// Implement [`CollectionName`] for [`StoredLog`]
impl CollectionName for StoredLog {
fn collection_name() -> &'static str {
"logs"
}
}
/// Implement [`CollectionName`] for [`StoredEthStarknetTransactionHash`]
impl CollectionName for StoredEthStarknetTransactionHash {
fn collection_name() -> &'static str {
"transaction_hashes"
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/types/log.rs | src/providers/eth_provider/database/types/log.rs | use super::receipt::StoredTransactionReceipt;
use alloy_rpc_types::Log;
use serde::{Deserialize, Serialize};
use std::ops::Deref;
/// A transaction receipt as stored in the database
#[derive(Debug, Deserialize, Clone, PartialEq, Eq, Serialize)]
pub struct StoredLog {
#[serde(deserialize_with = "crate::providers::eth_provider::database::types::serde::deserialize_intermediate")]
pub log: Log,
}
impl From<StoredLog> for Log {
fn from(log: StoredLog) -> Self {
log.log
}
}
impl From<&StoredLog> for Log {
fn from(log: &StoredLog) -> Self {
log.log.clone()
}
}
impl From<Log> for StoredLog {
fn from(log: Log) -> Self {
Self { log }
}
}
impl From<StoredTransactionReceipt> for Vec<StoredLog> {
fn from(value: StoredTransactionReceipt) -> Self {
value.receipt.inner.inner.logs().iter().cloned().map(Into::into).collect()
}
}
impl Deref for StoredLog {
type Target = Log;
fn deref(&self) -> &Self::Target {
&self.log
}
}
#[cfg(any(test, feature = "arbitrary", feature = "testing"))]
impl<'a> arbitrary::Arbitrary<'a> for StoredLog {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
Ok(Self {
log: Log {
block_hash: Some(alloy_primitives::B256::arbitrary(u)?),
block_number: Some(u64::arbitrary(u)?),
block_timestamp: Some(u64::arbitrary(u)?),
transaction_hash: Some(alloy_primitives::B256::arbitrary(u)?),
transaction_index: Some(u64::arbitrary(u)?),
log_index: Some(u64::arbitrary(u)?),
..Log::arbitrary(u)?
},
})
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/types/header.rs | src/providers/eth_provider/database/types/header.rs | use super::transaction::ExtendedTransaction;
use alloy_rpc_types::{Block, Header};
use alloy_serde::WithOtherFields;
use serde::{Deserialize, Serialize};
use std::ops::Deref;
#[cfg(any(test, feature = "arbitrary", feature = "testing"))]
use {
alloy_consensus::constants::EMPTY_ROOT_HASH,
alloy_primitives::{B256, B64, U256},
arbitrary::Arbitrary,
};
/// Type alias for a block that contains extended transactions and additional fields.
pub type ExtendedBlock = WithOtherFields<Block<ExtendedTransaction>>;
/// A header as stored in the database
#[derive(Debug, Serialize, Deserialize, Hash, Clone, PartialEq, Eq)]
pub struct StoredHeader {
#[serde(deserialize_with = "crate::providers::eth_provider::database::types::serde::deserialize_intermediate")]
pub header: Header,
}
impl From<StoredHeader> for Header {
fn from(header: StoredHeader) -> Self {
header.header
}
}
impl From<&StoredHeader> for Header {
fn from(header: &StoredHeader) -> Self {
header.header.clone()
}
}
impl Deref for StoredHeader {
type Target = Header;
fn deref(&self) -> &Self::Target {
&self.header
}
}
#[cfg(any(test, feature = "arbitrary", feature = "testing"))]
impl Arbitrary<'_> for StoredHeader {
fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
Ok(Self {
header: Header {
hash: B256::arbitrary(u)?,
total_difficulty: Some(U256::arbitrary(u).unwrap()),
mix_hash: Some(B256::arbitrary(u).unwrap()),
nonce: Some(B64::arbitrary(u).unwrap()),
withdrawals_root: Some(EMPTY_ROOT_HASH),
base_fee_per_gas: Some(u64::arbitrary(u).unwrap()),
blob_gas_used: Some(u64::arbitrary(u).unwrap()),
excess_blob_gas: Some(u64::arbitrary(u).unwrap()),
gas_limit: u64::arbitrary(u).unwrap(),
gas_used: u64::arbitrary(u).unwrap(),
number: u64::arbitrary(u).unwrap(),
..Header::arbitrary(u)?
},
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use arbitrary::Arbitrary;
use rand::Rng;
#[test]
fn test_stored_header_arbitrary() {
let mut bytes = [0u8; 1024];
rand::thread_rng().fill(bytes.as_mut_slice());
let _ = StoredHeader::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap();
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/types/receipt.rs | src/providers/eth_provider/database/types/receipt.rs | use alloy_rpc_types::TransactionReceipt;
use alloy_serde::WithOtherFields;
#[cfg(any(test, feature = "arbitrary", feature = "testing"))]
use reth_primitives::Receipt;
use serde::{Deserialize, Serialize};
/// Type alias for a transaction receipt with additional fields.
pub type ExtendedTxReceipt = WithOtherFields<TransactionReceipt>;
/// A transaction receipt as stored in the database
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone)]
pub struct StoredTransactionReceipt {
#[serde(deserialize_with = "crate::providers::eth_provider::database::types::serde::deserialize_intermediate")]
pub receipt: WithOtherFields<TransactionReceipt>,
}
impl From<StoredTransactionReceipt> for WithOtherFields<TransactionReceipt> {
fn from(receipt: StoredTransactionReceipt) -> Self {
receipt.receipt
}
}
#[cfg(any(test, feature = "arbitrary", feature = "testing"))]
impl<'a> arbitrary::Arbitrary<'a> for StoredTransactionReceipt {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
use alloy_primitives::{Address, Bloom, B256};
let receipt = Receipt::arbitrary(u)?;
let mut logs = Vec::new();
for log in receipt.logs {
logs.push(alloy_rpc_types::Log {
transaction_index: Some(u64::arbitrary(u)?),
log_index: Some(u64::arbitrary(u)?),
removed: bool::arbitrary(u)?,
inner: log,
block_hash: Some(B256::arbitrary(u)?),
block_number: Some(u64::arbitrary(u)?),
block_timestamp: Some(u64::arbitrary(u)?),
transaction_hash: Some(B256::arbitrary(u)?),
});
}
let receipt = alloy_rpc_types::ReceiptWithBloom {
receipt: alloy_rpc_types::Receipt {
status: bool::arbitrary(u)?.into(),
cumulative_gas_used: u128::from(u64::arbitrary(u)?),
logs,
},
logs_bloom: Bloom::arbitrary(u)?,
};
Ok(Self {
receipt: WithOtherFields::new(TransactionReceipt {
transaction_hash: B256::arbitrary(u)?,
transaction_index: Some(u64::arbitrary(u)?),
block_hash: Some(B256::arbitrary(u)?),
block_number: Some(u64::arbitrary(u)?),
gas_used: u128::arbitrary(u)?,
effective_gas_price: u128::arbitrary(u)?,
blob_gas_used: Some(u128::arbitrary(u)?),
blob_gas_price: Some(u128::arbitrary(u)?),
from: Address::arbitrary(u)?,
to: Some(Address::arbitrary(u)?),
contract_address: Some(Address::arbitrary(u)?),
inner: match u.int_in_range(0..=3)? {
0 => alloy_consensus::ReceiptEnvelope::Legacy(receipt),
1 => alloy_consensus::ReceiptEnvelope::Eip2930(receipt),
2 => alloy_consensus::ReceiptEnvelope::Eip1559(receipt),
3 => alloy_consensus::ReceiptEnvelope::Eip4844(receipt),
_ => unreachable!(),
},
authorization_list: None,
}),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use arbitrary::Arbitrary;
use rand::Rng;
#[test]
fn test_stored_transaction_receipt_arbitrary() {
let mut bytes = [0u8; 1024];
rand::thread_rng().fill(bytes.as_mut_slice());
let _ = StoredTransactionReceipt::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap();
}
}
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
kkrt-labs/kakarot-rpc | https://github.com/kkrt-labs/kakarot-rpc/blob/6b48598a85bb2598123e1be7237da9832d0d5eaa/src/providers/eth_provider/database/types/mod.rs | src/providers/eth_provider/database/types/mod.rs | pub mod header;
pub mod log;
pub mod receipt;
pub mod serde;
pub mod transaction;
| rust | MIT | 6b48598a85bb2598123e1be7237da9832d0d5eaa | 2026-01-04T20:20:26.112976Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.