repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/p2p/src/protocol.rs | crates/p2p/src/protocol.rs | use libp2p::StreamProtocol;
use std::{collections::HashSet, hash::Hash};
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Protocol {
// validator or orchestrator -> worker
Authentication,
// validator -> worker
HardwareChallenge,
// orchestrator -> worker
Invite,
// any -> worker
GetTaskLogs,
// any -> worker
Restart,
// any -> any
General,
}
impl Protocol {
pub(crate) fn as_stream_protocol(&self) -> StreamProtocol {
match self {
Protocol::Authentication => StreamProtocol::new("/prime/authentication/1.0.0"),
Protocol::HardwareChallenge => StreamProtocol::new("/prime/hardware_challenge/1.0.0"),
Protocol::Invite => StreamProtocol::new("/prime/invite/1.0.0"),
Protocol::GetTaskLogs => StreamProtocol::new("/prime/get_task_logs/1.0.0"),
Protocol::Restart => StreamProtocol::new("/prime/restart/1.0.0"),
Protocol::General => StreamProtocol::new("/prime/general/1.0.0"),
}
}
}
#[derive(Debug, Clone)]
pub struct Protocols(HashSet<Protocol>);
impl Default for Protocols {
fn default() -> Self {
Self::new()
}
}
impl Protocols {
pub fn new() -> Self {
Self(HashSet::new())
}
pub fn has_authentication(&self) -> bool {
self.0.contains(&Protocol::Authentication)
}
pub fn has_hardware_challenge(&self) -> bool {
self.0.contains(&Protocol::HardwareChallenge)
}
pub fn has_invite(&self) -> bool {
self.0.contains(&Protocol::Invite)
}
pub fn has_get_task_logs(&self) -> bool {
self.0.contains(&Protocol::GetTaskLogs)
}
pub fn has_restart(&self) -> bool {
self.0.contains(&Protocol::Restart)
}
pub fn has_general(&self) -> bool {
self.0.contains(&Protocol::General)
}
pub fn with_authentication(mut self) -> Self {
self.0.insert(Protocol::Authentication);
self
}
pub fn with_hardware_challenge(mut self) -> Self {
self.0.insert(Protocol::HardwareChallenge);
self
}
pub fn with_invite(mut self) -> Self {
self.0.insert(Protocol::Invite);
self
}
pub fn with_get_task_logs(mut self) -> Self {
self.0.insert(Protocol::GetTaskLogs);
self
}
pub fn with_restart(mut self) -> Self {
self.0.insert(Protocol::Restart);
self
}
pub fn with_general(mut self) -> Self {
self.0.insert(Protocol::General);
self
}
pub(crate) fn join(&mut self, other: Protocols) {
self.0.extend(other.0);
}
}
impl IntoIterator for Protocols {
type Item = Protocol;
type IntoIter = std::collections::hash_set::IntoIter<Protocol>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/p2p/src/message/hardware_challenge.rs | crates/p2p/src/message/hardware_challenge.rs | use nalgebra::DMatrix;
use serde::{
de::{self, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
use std::fmt;
#[derive(Debug, Clone)]
pub struct FixedF64(pub f64);
impl Serialize for FixedF64 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// adjust precision as needed
serializer.serialize_str(&format!("{:.12}", self.0))
}
}
impl<'de> Deserialize<'de> for FixedF64 {
fn deserialize<D>(deserializer: D) -> Result<FixedF64, D::Error>
where
D: Deserializer<'de>,
{
struct FixedF64Visitor;
impl Visitor<'_> for FixedF64Visitor {
type Value = FixedF64;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a string representing a fixed precision float")
}
fn visit_str<E>(self, value: &str) -> Result<FixedF64, E>
where
E: de::Error,
{
value
.parse::<f64>()
.map(FixedF64)
.map_err(|_| E::custom(format!("invalid f64: {value}")))
}
}
deserializer.deserialize_str(FixedF64Visitor)
}
}
impl PartialEq for FixedF64 {
fn eq(&self, other: &Self) -> bool {
format!("{:.10}", self.0) == format!("{:.10}", other.0)
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct ChallengeRequest {
pub rows_a: usize,
pub cols_a: usize,
pub data_a: Vec<FixedF64>,
pub rows_b: usize,
pub cols_b: usize,
pub data_b: Vec<FixedF64>,
pub timestamp: Option<u64>,
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct ChallengeResponse {
pub result: Vec<FixedF64>,
pub rows: usize,
pub cols: usize,
}
pub fn calc_matrix(req: &ChallengeRequest) -> ChallengeResponse {
// convert FixedF64 to f64
let data_a: Vec<f64> = req.data_a.iter().map(|x| x.0).collect();
let data_b: Vec<f64> = req.data_b.iter().map(|x| x.0).collect();
let a = DMatrix::from_vec(req.rows_a, req.cols_a, data_a);
let b = DMatrix::from_vec(req.rows_b, req.cols_b, data_b);
let c = a * b;
let data_c: Vec<FixedF64> = c.iter().map(|x| FixedF64(*x)).collect();
ChallengeResponse {
rows: c.nrows(),
cols: c.ncols(),
result: data_c,
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/p2p/src/message/mod.rs | crates/p2p/src/message/mod.rs | use crate::Protocol;
use libp2p::PeerId;
use serde::{Deserialize, Serialize};
use std::time::SystemTime;
mod hardware_challenge;
pub use hardware_challenge::*;
#[derive(Debug)]
pub struct IncomingMessage {
pub peer: PeerId,
pub message: libp2p::request_response::Message<Request, Response>,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub enum OutgoingMessage {
Request((PeerId, Vec<libp2p::Multiaddr>, Request)),
Response(
(
libp2p::request_response::ResponseChannel<Response>,
Response,
),
),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Request {
Authentication(AuthenticationRequest),
HardwareChallenge(HardwareChallengeRequest),
Invite(InviteRequest),
GetTaskLogs,
RestartTask,
General(GeneralRequest),
}
impl Request {
pub fn into_outgoing_message(
self,
peer: PeerId,
multiaddrs: Vec<libp2p::Multiaddr>,
) -> OutgoingMessage {
OutgoingMessage::Request((peer, multiaddrs, self))
}
pub fn protocol(&self) -> Protocol {
match self {
Request::Authentication(_) => Protocol::Authentication,
Request::HardwareChallenge(_) => Protocol::HardwareChallenge,
Request::Invite(_) => Protocol::Invite,
Request::GetTaskLogs => Protocol::GetTaskLogs,
Request::RestartTask => Protocol::Restart,
Request::General(_) => Protocol::General,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Response {
Authentication(AuthenticationResponse),
HardwareChallenge(HardwareChallengeResponse),
Invite(InviteResponse),
GetTaskLogs(GetTaskLogsResponse),
RestartTask(RestartTaskResponse),
General(GeneralResponse),
}
impl Response {
pub fn into_outgoing_message(
self,
channel: libp2p::request_response::ResponseChannel<Response>,
) -> OutgoingMessage {
OutgoingMessage::Response((channel, self))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AuthenticationRequest {
Initiation(AuthenticationInitiationRequest),
Solution(AuthenticationSolutionRequest),
}
impl From<AuthenticationRequest> for Request {
fn from(request: AuthenticationRequest) -> Self {
Request::Authentication(request)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AuthenticationResponse {
Initiation(AuthenticationInitiationResponse),
Solution(AuthenticationSolutionResponse),
}
impl From<AuthenticationResponse> for Response {
fn from(response: AuthenticationResponse) -> Self {
Response::Authentication(response)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuthenticationInitiationRequest {
pub message: String,
}
impl From<AuthenticationInitiationRequest> for Request {
fn from(request: AuthenticationInitiationRequest) -> Self {
Request::Authentication(AuthenticationRequest::Initiation(request))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuthenticationInitiationResponse {
pub signature: String,
pub message: String,
}
impl From<AuthenticationInitiationResponse> for Response {
fn from(response: AuthenticationInitiationResponse) -> Self {
Response::Authentication(AuthenticationResponse::Initiation(response))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AuthenticationSolutionRequest {
pub signature: String,
}
impl From<AuthenticationSolutionRequest> for Request {
fn from(request: AuthenticationSolutionRequest) -> Self {
Request::Authentication(AuthenticationRequest::Solution(request))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum AuthenticationSolutionResponse {
Granted,
Rejected,
}
impl From<AuthenticationSolutionResponse> for Response {
fn from(response: AuthenticationSolutionResponse) -> Self {
Response::Authentication(AuthenticationResponse::Solution(response))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HardwareChallengeRequest {
pub challenge: ChallengeRequest,
pub timestamp: SystemTime,
}
impl From<HardwareChallengeRequest> for Request {
fn from(request: HardwareChallengeRequest) -> Self {
Request::HardwareChallenge(request)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HardwareChallengeResponse {
pub response: ChallengeResponse,
pub timestamp: SystemTime,
}
impl From<HardwareChallengeResponse> for Response {
fn from(response: HardwareChallengeResponse) -> Self {
Response::HardwareChallenge(response)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum InviteRequestUrl {
MasterUrl(String),
MasterIpPort(String, u16),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InviteRequest {
pub invite: String,
pub pool_id: u32,
pub url: InviteRequestUrl,
pub timestamp: u64,
pub expiration: [u8; 32],
pub nonce: [u8; 32],
}
impl From<InviteRequest> for Request {
fn from(request: InviteRequest) -> Self {
Request::Invite(request)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum InviteResponse {
Ok,
Error(String),
}
impl From<InviteResponse> for Response {
fn from(response: InviteResponse) -> Self {
Response::Invite(response)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum GetTaskLogsResponse {
Ok(String),
Error(String),
}
impl From<GetTaskLogsResponse> for Response {
fn from(response: GetTaskLogsResponse) -> Self {
Response::GetTaskLogs(response)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum RestartTaskResponse {
Ok,
Error(String),
}
impl From<RestartTaskResponse> for Response {
fn from(response: RestartTaskResponse) -> Self {
Response::RestartTask(response)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneralRequest {
data: Vec<u8>,
}
impl From<GeneralRequest> for Request {
fn from(request: GeneralRequest) -> Self {
Request::General(request)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GeneralResponse {
data: Vec<u8>,
}
impl From<GeneralResponse> for Response {
fn from(response: GeneralResponse) -> Self {
Response::General(response)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/lib.rs | crates/shared/src/lib.rs | pub mod models;
pub mod p2p;
pub mod security;
pub mod utils;
pub mod web3;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/node.rs | crates/shared/src/models/node.rs | use alloy::primitives::U256;
use anyhow::anyhow;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::Deref;
use std::str::FromStr;
use utoipa::{openapi::Object, ToSchema};
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct Node {
pub id: String,
// the node's on-chain address.
pub provider_address: String,
pub ip_address: String,
pub port: u16,
pub compute_pool_id: u32,
pub compute_specs: Option<ComputeSpecs>,
#[serde(skip_serializing_if = "Option::is_none")]
pub worker_p2p_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub worker_p2p_addresses: Option<Vec<String>>,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, ToSchema)]
pub struct ComputeSpecs {
// GPU specifications
pub gpu: Option<GpuSpecs>,
// CPU specifications
pub cpu: Option<CpuSpecs>,
// Memory and storage specifications
pub ram_mb: Option<u32>,
pub storage_gb: Option<u32>,
pub storage_path: String,
}
impl Default for ComputeSpecs {
fn default() -> Self {
Self {
gpu: None,
cpu: None,
ram_mb: None,
storage_gb: None,
storage_path: "/var/lib/prime-worker".to_string(),
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct ComputeRequirements {
// List of alternative GPU requirements (OR logic)
pub gpu: Vec<GpuRequirements>,
pub cpu: Option<CpuSpecs>,
pub ram_mb: Option<u32>,
pub storage_gb: Option<u32>,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct GpuRequirements {
pub count: Option<u32>,
pub model: Option<String>,
// per Card
pub memory_mb: Option<u32>,
pub memory_mb_min: Option<u32>,
pub memory_mb_max: Option<u32>,
// System wide GPU memory per gpu type
pub total_memory_min: Option<u32>,
pub total_memory_max: Option<u32>,
pub indices: Option<Vec<u32>>,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct GpuSpecs {
pub count: Option<u32>,
pub model: Option<String>,
pub memory_mb: Option<u32>,
pub indices: Option<Vec<u32>>,
}
impl fmt::Display for ComputeRequirements {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if !self.gpu.is_empty() {
writeln!(f, "GPU Requirements (any of the following):")?;
for (i, gpu) in self.gpu.iter().enumerate() {
writeln!(f, " Option {}: {}", i + 1, gpu)?;
}
}
if let Some(cpu) = &self.cpu {
writeln!(f, "CPU: {cpu}")?;
}
if let Some(ram) = self.ram_mb {
writeln!(f, "RAM: {ram} MB")?;
}
if let Some(storage) = self.storage_gb {
writeln!(f, "Storage: {storage} GB")?;
}
Ok(())
}
}
impl fmt::Display for GpuRequirements {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut parts = Vec::new();
if let Some(count) = self.count {
parts.push(format!("{count} GPU(s)"));
}
if let Some(model) = &self.model {
parts.push(format!("Model: {model}"));
}
if let Some(memory) = self.memory_mb {
parts.push(format!("Memory: {memory} MB"));
}
if parts.is_empty() {
write!(f, "No specific GPU requirements")
} else {
write!(f, "{}", parts.join(", "))
}
}
}
impl fmt::Display for GpuSpecs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut parts = Vec::new();
if let Some(count) = self.count {
parts.push(format!("{count} GPU(s)"));
}
if let Some(model) = &self.model {
parts.push(format!("Model: {model}"));
}
if let Some(memory) = self.memory_mb {
parts.push(format!("Memory: {memory} MB"));
}
if parts.is_empty() {
write!(f, "No specific GPU requirements")
} else {
write!(f, "{}", parts.join(", "))
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct CpuSpecs {
pub cores: Option<u32>,
pub model: Option<String>,
}
impl fmt::Display for CpuSpecs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut parts = Vec::new();
if let Some(cores) = self.cores {
parts.push(format!("{cores} cores"));
}
if let Some(model) = &self.model {
parts.push(format!("Model: {model}"));
}
if parts.is_empty() {
write!(f, "No specific CPU requirements")
} else {
write!(f, "{}", parts.join(", "))
}
}
}
// Parser for compute requirements string
impl FromStr for ComputeRequirements {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut requirements = ComputeRequirements::default();
let mut current_gpu_spec = GpuRequirements::default();
let mut gpu_spec_started = false;
for part in s.split(';') {
let part = part.trim();
if part.is_empty() {
continue;
}
let parts: Vec<&str> = part.splitn(2, '=').collect();
if parts.len() != 2 {
return Err(anyhow!("Invalid key-value pair format: '{}'", part));
}
let key = parts[0].trim();
let value = parts[1].trim();
match key {
// --- GPU Specifications ---
"gpu:count" => {
// If we have a complete GPU spec, push it before starting a new one
if gpu_spec_started && current_gpu_spec.count.is_some() {
requirements.gpu.push(current_gpu_spec);
current_gpu_spec = GpuRequirements::default();
}
gpu_spec_started = true;
current_gpu_spec.count = Some(
value
.parse::<u32>()
.map_err(|e| anyhow!("Invalid gpu:count value '{}': {}", value, e))?,
);
}
"gpu:model" => {
if !gpu_spec_started {
gpu_spec_started = true;
}
current_gpu_spec.model = Some(value.to_string());
}
"gpu:memory_mb" => {
if !gpu_spec_started {
gpu_spec_started = true;
}
if current_gpu_spec.memory_mb_min.is_some()
|| current_gpu_spec.memory_mb_max.is_some()
{
return Err(anyhow!(
"Cannot specify both exact memory and min/max memory"
));
}
current_gpu_spec.memory_mb =
Some(value.parse::<u32>().map_err(|e| {
anyhow!("Invalid gpu:memory_mb value '{}': {}", value, e)
})?);
}
"gpu:memory_mb_min" => {
if !gpu_spec_started {
gpu_spec_started = true;
}
if current_gpu_spec.memory_mb.is_some() {
return Err(anyhow!(
"Cannot specify both exact memory and min/max memory"
));
}
if current_gpu_spec.memory_mb_max.is_some()
&& current_gpu_spec.memory_mb_max.unwrap() < value.parse::<u32>().unwrap()
{
return Err(anyhow!(
"Invalid gpu:memory_mb_min value '{}': {}",
value,
"min value is greater than max value"
));
}
current_gpu_spec.memory_mb_min = Some(value.parse::<u32>().map_err(|e| {
anyhow!("Invalid gpu:memory_mb_min value '{}': {}", value, e)
})?);
}
"gpu:memory_mb_max" => {
if !gpu_spec_started {
gpu_spec_started = true;
}
// Ensure we don't have exact memory already set
if current_gpu_spec.memory_mb.is_some() {
return Err(anyhow!(
"Cannot specify both exact memory and min/max memory"
));
}
if current_gpu_spec.memory_mb_min.is_some()
&& current_gpu_spec.memory_mb_min.unwrap() > value.parse::<u32>().unwrap()
{
return Err(anyhow!(
"Invalid gpu:memory_mb_max value '{}': {}",
value,
"max value is less than min value"
));
}
current_gpu_spec.memory_mb_max = Some(value.parse::<u32>().map_err(|e| {
anyhow!("Invalid gpu:memory_mb_max value '{}': {}", value, e)
})?);
}
// --- Total GPU Memory Specifications ---
"gpu:total_memory_min" => {
if !gpu_spec_started {
gpu_spec_started = true;
}
if current_gpu_spec.total_memory_max.is_some()
&& current_gpu_spec.total_memory_max.unwrap()
< value.parse::<u32>().unwrap()
{
return Err(anyhow!(
"Invalid gpu:total_memory_min value '{}': {}",
value,
"min value is greater than max value"
));
}
current_gpu_spec.total_memory_min =
Some(value.parse::<u32>().map_err(|e| {
anyhow!("Invalid gpu:total_memory_min value '{}': {}", value, e)
})?);
}
"gpu:total_memory_max" => {
if !gpu_spec_started {
gpu_spec_started = true;
}
if current_gpu_spec.total_memory_min.is_some()
&& current_gpu_spec.total_memory_min.unwrap()
> value.parse::<u32>().unwrap()
{
return Err(anyhow!(
"Invalid gpu:total_memory_max value '{}': {}",
value,
"max value is less than min value"
));
}
current_gpu_spec.total_memory_max =
Some(value.parse::<u32>().map_err(|e| {
anyhow!("Invalid gpu:total_memory_max value '{}': {}", value, e)
})?);
}
// --- CPU Specifications ---
"cpu:cores" => {
let mut cpu = requirements.cpu.take().unwrap_or_default();
cpu.cores = Some(
value
.parse::<u32>()
.map_err(|e| anyhow!("Invalid cpu:cores value '{}': {}", value, e))?,
);
requirements.cpu = Some(cpu);
}
// --- Memory and Storage ---
"ram_mb" => {
requirements.ram_mb = Some(
value
.parse::<u32>()
.map_err(|e| anyhow!("Invalid ram_mb value '{}': {}", value, e))?,
);
}
"storage_gb" => {
requirements.storage_gb = Some(
value
.parse::<u32>()
.map_err(|e| anyhow!("Invalid storage_gb value '{}': {}", value, e))?,
);
}
_ => return Err(anyhow!("Unknown requirement key: '{}'", key)),
}
}
// Push the last GPU spec if it exists and has any properties set
if gpu_spec_started
&& (current_gpu_spec.count.is_some()
|| current_gpu_spec.model.is_some()
|| current_gpu_spec.memory_mb.is_some()
|| current_gpu_spec.memory_mb_min.is_some()
|| current_gpu_spec.memory_mb_max.is_some()
|| current_gpu_spec.total_memory_min.is_some()
|| current_gpu_spec.total_memory_max.is_some())
{
requirements.gpu.push(current_gpu_spec);
}
Ok(requirements)
}
}
use log::{debug, info};
impl ComputeSpecs {
/// Checks if the current compute specs meet the given requirements.
pub fn meets(&self, requirements: &ComputeRequirements) -> bool {
// Check CPU (if required)
if let Some(req_cpu) = &requirements.cpu {
if !self
.cpu
.as_ref()
.is_some_and(|spec_cpu| spec_cpu.meets(req_cpu))
{
info!(
"CPU requirements not met: required {:?}, have {:?}",
req_cpu, self.cpu
);
return false;
}
}
// Check RAM (if required)
if let Some(req_ram) = requirements.ram_mb {
if self.ram_mb.is_none_or(|spec_ram| spec_ram < req_ram) {
info!(
"RAM requirements not met: required {} MB, have {:?} MB",
req_ram, self.ram_mb
);
return false;
}
}
// Check Storage (if required)
if let Some(req_storage) = requirements.storage_gb {
if self
.storage_gb
.is_none_or(|spec_storage| spec_storage < req_storage)
{
info!(
"Storage requirements not met: required {} GB, have {:?} GB",
req_storage, self.storage_gb
);
return false;
}
}
// Check GPU (OR logic applied here)
if !requirements.gpu.is_empty() {
// Requirements specify GPUs, so the node must have a GPU spec...
let Some(spec_gpu) = &self.gpu else {
debug!("GPU requirements not met: GPU required but none available");
return false;
};
// ...and that GPU spec must meet *at least one* of the requirement options.
if !requirements
.gpu
.iter()
.any(|req_gpu| spec_gpu.meets(req_gpu))
{
debug!("GPU requirements not met");
return false;
}
}
// If requirements.gpu is empty, no specific GPU is needed, so this part passes.
// All checked requirements are met
true
}
}
impl GpuSpecs {
/// Checks if the current GPU spec meets a single required GPU spec.
fn meets(&self, requirement: &GpuRequirements) -> bool {
// Check count (if required)
if let Some(req_count) = requirement.count {
// Node must have exactly the required count. Node having None is okay only if req_count is 0.
match self.count {
None => {
if req_count > 0 {
return false;
}
}
Some(spec_count) => {
if spec_count != req_count {
return false;
}
}
}
}
if let Some(req_model) = &requirement.model {
if !(self.model.as_ref().is_some_and(|spec_model| {
let normalized_spec = spec_model.to_lowercase().replace(' ', "_");
// Split the requirement model string by commas and check if any match
req_model
.split(',')
.map(|m| m.trim().to_lowercase().replace(' ', "_"))
.any(|normalized_req| {
// Try both with and without underscores for flexible matching
let spec_no_underscore = normalized_spec.replace('_', "");
let req_no_underscore = normalized_req.replace('_', "");
normalized_spec.contains(&normalized_req)
|| normalized_req.contains(&normalized_spec)
|| spec_no_underscore.contains(&req_no_underscore)
|| req_no_underscore.contains(&spec_no_underscore)
})
})) {
return false;
}
}
// Check memory per GPU (if required)
if let Some(req_mem) = requirement.memory_mb {
if self.memory_mb.is_none_or(|spec_mem| spec_mem < req_mem) {
return false;
}
}
// Check memory range if specified
if let Some(req_min) = requirement.memory_mb_min {
if self.memory_mb.is_none_or(|spec_mem| spec_mem < req_min) {
return false;
}
}
if let Some(req_max) = requirement.memory_mb_max {
if self.memory_mb.is_none_or(|spec_mem| spec_mem > req_max) {
return false;
}
}
// Check total memory requirements (count * memory_mb)
if let (Some(req_total_min), Some(gpu_count), Some(gpu_memory)) =
(requirement.total_memory_min, self.count, self.memory_mb)
{
let total_memory = gpu_count * gpu_memory;
if total_memory < req_total_min {
return false;
}
}
if let (Some(req_total_max), Some(gpu_count), Some(gpu_memory)) =
(requirement.total_memory_max, self.count, self.memory_mb)
{
let total_memory = gpu_count * gpu_memory;
if total_memory > req_total_max {
return false;
}
}
// All checked fields meet the requirement
true
}
}
impl CpuSpecs {
/// Checks if the current CPU spec meets the required CPU spec.
fn meets(&self, requirement: &CpuSpecs) -> bool {
// Check cores (if required)
if let Some(req_cores) = requirement.cores {
if self.cores.is_none_or(|spec_cores| spec_cores < req_cores) {
return false;
}
}
true
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct NodeLocation {
pub latitude: f64,
pub longitude: f64,
pub city: Option<String>,
pub region: Option<String>,
pub country: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Default, ToSchema)]
pub struct DiscoveryNode {
#[serde(flatten)]
pub node: Node,
pub is_validated: bool,
pub is_active: bool,
#[serde(default)]
pub is_provider_whitelisted: bool,
#[serde(default)]
pub is_blacklisted: bool,
#[serde(default)]
pub last_updated: Option<DateTime<Utc>>,
#[serde(default)]
pub created_at: Option<DateTime<Utc>>,
#[serde(default)]
pub location: Option<NodeLocation>,
#[schema(schema_with = u256_schema)]
pub latest_balance: Option<U256>,
}
fn u256_schema() -> Object {
utoipa::openapi::ObjectBuilder::new()
.schema_type(utoipa::openapi::schema::Type::String)
.description(Some("A U256 value represented as a decimal string"))
.examples(Some(serde_json::json!("1000000000000000000")))
.build()
}
impl DiscoveryNode {
pub fn with_updated_node(&self, new_node: Node) -> Self {
DiscoveryNode {
node: new_node,
is_validated: self.is_validated,
is_active: self.is_active,
is_provider_whitelisted: self.is_provider_whitelisted,
is_blacklisted: self.is_blacklisted,
last_updated: Some(Utc::now()),
created_at: self.created_at,
location: self.location.clone(),
latest_balance: self.latest_balance,
}
}
}
impl Deref for DiscoveryNode {
type Target = Node;
fn deref(&self) -> &Self::Target {
&self.node
}
}
impl From<Node> for DiscoveryNode {
fn from(node: Node) -> Self {
DiscoveryNode {
node,
is_validated: false, // Default values for new discovery nodes
is_active: false,
is_provider_whitelisted: false,
is_blacklisted: false,
last_updated: None,
created_at: Some(Utc::now()),
location: None,
latest_balance: None,
}
}
}
// --- Tests ---
#[cfg(test)]
mod tests {
use super::*;
// Helper to create ComputeSpecs for testing
fn create_compute_specs(
gpu_count: Option<u32>,
gpu_model: Option<&str>,
gpu_mem: Option<u32>,
cpu_cores: Option<u32>,
ram: Option<u32>,
storage: Option<u32>,
) -> ComputeSpecs {
ComputeSpecs {
gpu: if gpu_count.is_some() || gpu_model.is_some() || gpu_mem.is_some() {
Some(GpuSpecs {
count: gpu_count,
model: gpu_model.map(String::from),
memory_mb: gpu_mem,
..Default::default()
})
} else {
None
},
cpu: if cpu_cores.is_some() {
Some(CpuSpecs {
cores: cpu_cores,
model: None,
})
} else {
None
},
ram_mb: ram,
storage_gb: storage,
storage_path: "/var/lib/prime-worker-test".to_string(),
}
}
#[test]
fn test_requirements_parser_simple() {
let req_str = "gpu:count=1;gpu:model=A100;gpu:memory_mb=40000;ram_mb=64000;storage_gb=500";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert_eq!(requirements.gpu.len(), 1);
let gpu_req = &requirements.gpu[0];
assert_eq!(gpu_req.count, Some(1));
assert_eq!(gpu_req.model, Some("A100".to_string()));
assert_eq!(gpu_req.memory_mb, Some(40000));
assert_eq!(requirements.ram_mb, Some(64000));
assert_eq!(requirements.storage_gb, Some(500));
assert!(requirements.cpu.is_none());
}
#[test]
fn test_requirements_parser_gpu_or_logic() {
let req_str = "gpu:count=8;gpu:model=H100;gpu:memory_mb=80000 ; gpu:count=4;gpu:model=H100;gpu:memory_mb=80000 ; ram_mb=128000; storage_gb=1000";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert_eq!(requirements.gpu.len(), 2);
// First GPU option
assert_eq!(requirements.gpu[0].count, Some(8));
assert_eq!(requirements.gpu[0].model, Some("H100".to_string()));
assert_eq!(requirements.gpu[0].memory_mb, Some(80000));
// Second GPU option
assert_eq!(requirements.gpu[1].count, Some(4));
assert_eq!(requirements.gpu[1].model, Some("H100".to_string()));
// Memory wasn't repeated for the second option in the *string*, parser should pick it up if specified like gpu:count=4;gpu:model=H100;gpu:memory_mb=80000
// Let's re-run with memory specified for the second option
let req_str_mem_repeat = "gpu:count=8;gpu:model=H100;gpu:memory_mb=80000 ; gpu:count=4;gpu:model=H100;gpu:memory_mb=80000 ; ram_mb=128000; storage_gb=1000";
let requirements_mem_repeat = ComputeRequirements::from_str(req_str_mem_repeat).unwrap();
assert_eq!(requirements_mem_repeat.gpu.len(), 2);
assert_eq!(requirements_mem_repeat.gpu[1].memory_mb, Some(80000));
// Common requirements
assert_eq!(requirements.ram_mb, Some(128000));
assert_eq!(requirements.storage_gb, Some(1000));
}
#[test]
fn test_requirements_parser_gpu_minimal() {
// Only specify count for the second option
let req_str = "gpu:count=8;gpu:model=H100 ; gpu:count=16; ram_mb=128000";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert_eq!(requirements.gpu.len(), 2);
assert_eq!(requirements.gpu[0].count, Some(8));
assert_eq!(requirements.gpu[0].model, Some("H100".to_string()));
assert!(requirements.gpu[0].memory_mb.is_none()); // No memory specified for first
assert_eq!(requirements.gpu[1].count, Some(16));
assert!(requirements.gpu[1].model.is_none()); // No model specified for second
assert!(requirements.gpu[1].memory_mb.is_none()); // No memory specified for second
assert_eq!(requirements.ram_mb, Some(128000));
}
#[test]
fn test_requirements_parser_no_gpu() {
let req_str = "ram_mb=32000;storage_gb=250;cpu:cores=8";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(requirements.gpu.is_empty());
assert_eq!(requirements.ram_mb, Some(32000));
assert_eq!(requirements.storage_gb, Some(250));
assert!(requirements.cpu.is_some());
assert_eq!(requirements.cpu.as_ref().unwrap().cores, Some(8));
}
#[test]
fn test_requirements_parser_invalid() {
assert!(ComputeRequirements::from_str("gpu:count=abc").is_err());
assert!(ComputeRequirements::from_str("ram_mb=100;gpu_model=xyz").is_err()); // Invalid key
assert!(ComputeRequirements::from_str("gpu:count=1=2").is_err()); // Invalid format
}
// --- Meeting Requirements Tests ---
#[test]
fn test_meets_exact_match() {
let specs = create_compute_specs(
Some(4),
Some("nvidia_a100_80gb_pcie"),
Some(40000),
Some(16),
Some(64000),
Some(500),
);
let req_str = "gpu:count=4;gpu:model=A100;gpu:memory_mb=40000;cpu:cores=16;ram_mb=64000;storage_gb=500";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(specs.meets(&requirements));
}
#[test]
fn test_a100_range_case() {
let specs = create_compute_specs(
Some(1),
Some("nvidia_a100_80gb_pcie"),
Some(40000),
Some(16),
Some(64000),
Some(700),
);
let req_str = "gpu:count=4;gpu:model=a100,h100,h200;gpu:count=1;gpu:model=a100,h100,h200;storage_gb=700";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(specs.meets(&requirements));
}
#[test]
fn test_meets_more_than_required() {
let specs = create_compute_specs(
Some(8),
Some("NVIDIA A100 80GB"),
Some(80000),
Some(32),
Some(128000),
Some(1000),
);
// Requirements are lower
let req_str = "gpu:count=8;gpu:model=A100;gpu:memory_mb=40000;cpu:cores=16;ram_mb=64000;storage_gb=500";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(specs.meets(&requirements));
}
#[test]
fn test_meets_fails_ram() {
let specs = create_compute_specs(
Some(4),
Some("A100"),
Some(40000),
Some(16),
Some(32000),
Some(500),
); // RAM too low
let req_str = "gpu:count=4;gpu:model=A100;gpu:memory_mb=40000;cpu:cores=16;ram_mb=64000;storage_gb=500";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(!specs.meets(&requirements));
}
#[test]
fn test_meets_fails_gpu_count() {
let specs = create_compute_specs(
Some(2),
Some("A100"),
Some(40000),
Some(16),
Some(64000),
Some(500),
); // GPU count too low
let req_str = "gpu:count=4;gpu:model=A100;gpu:memory_mb=40000;cpu:cores=16;ram_mb=64000;storage_gb=500";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(!specs.meets(&requirements));
}
#[test]
fn test_meets_fails_gpu_model() {
let specs = create_compute_specs(
Some(4),
Some("RTX 3090"),
Some(24000),
Some(16),
Some(64000),
Some(500),
); // Wrong GPU model
let req_str = "gpu:count=4;gpu:model=A100;gpu:memory_mb=40000;cpu:cores=16;ram_mb=64000;storage_gb=500";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(!specs.meets(&requirements));
}
#[test]
fn test_meets_gpu_or_option1() {
// Node has 8x H100
let specs = create_compute_specs(
Some(8),
Some("NVIDIA H100"),
Some(80000),
Some(64),
Some(256000),
Some(2000),
);
// Requirements allow 8x H100 OR 16x A100
let req_str = "gpu:count=8;gpu:model=H100;gpu:memory_mb=80000 ; gpu:count=16;gpu:model=A100;gpu:memory_mb=80000 ; ram_mb=128000; storage_gb=1000";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(specs.meets(&requirements)); // Should meet the first GPU option
}
#[test]
fn test_meets_gpu_or_option2() {
// Node has 16x A100
let specs = create_compute_specs(
Some(16),
Some("NVIDIA A100"),
Some(80000),
Some(64),
Some(256000),
Some(2000),
);
// Requirements allow 8x H100 OR 16x A100
let req_str = "gpu:count=8;gpu:model=H100;gpu:memory_mb=80000 ; gpu:count=16;gpu:model=A100;gpu:memory_mb=80000 ; ram_mb=128000; storage_gb=1000";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(specs.meets(&requirements)); // Should meet the second GPU option
}
#[test]
fn test_meets_gpu_or_fails_both() {
// Node has 4x A100
let specs = create_compute_specs(
Some(4),
Some("NVIDIA A100"),
Some(80000),
Some(64),
Some(256000),
Some(2000),
);
// Requirements allow 8x H100 OR 16x A100
let req_str = "gpu:count=8;gpu:model=H100;gpu:memory_mb=80000 ; gpu:count=16;gpu:model=A100;gpu:memory_mb=80000 ; ram_mb=128000; storage_gb=1000";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(!specs.meets(&requirements)); // Fails both GPU options (count is too low)
}
#[test]
fn test_meets_no_gpu_required() {
// Node has a GPU
let specs_with_gpu = create_compute_specs(
Some(1),
Some("RTX 3060"),
Some(12000),
Some(8),
Some(32000),
Some(500),
);
// Node has no GPU
let specs_no_gpu = create_compute_specs(None, None, None, Some(8), Some(32000), Some(500));
// Requirement doesn't mention GPU
let req_str = "ram_mb=16000;storage_gb=200;cpu:cores=4";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(specs_with_gpu.meets(&requirements)); // Meets because GPU isn't required
assert!(specs_no_gpu.meets(&requirements)); // Meets because GPU isn't required
}
#[test]
fn test_meets_gpu_required_node_has_none() {
// Node has no GPU
let specs = create_compute_specs(None, None, None, Some(8), Some(32000), Some(500));
// Requirement needs a GPU
let req_str = "gpu:count=1;gpu:model=A100;ram_mb=16000";
let requirements = ComputeRequirements::from_str(req_str).unwrap();
assert!(!specs.meets(&requirements)); // Fails because node lacks GPU
}
#[test]
fn test_meets_optional_fields_in_req() {
// Node has specific specs
let specs = create_compute_specs(
Some(4),
Some("NVIDIA H100"),
Some(80000),
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/storage.rs | crates/shared/src/models/storage.rs | use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
#[derive(Deserialize, Serialize, Debug, ToSchema)]
pub struct RequestUploadRequest {
pub file_name: String,
pub file_size: u64,
pub file_type: String,
pub sha256: String,
pub task_id: String,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/api.rs | crates/shared/src/models/api.rs | use actix_web::HttpResponse;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)]
pub struct ApiResponse<T: Serialize> {
pub success: bool,
pub data: T,
}
impl<T: Serialize> ApiResponse<T> {
pub fn new(success: bool, data: T) -> Self {
ApiResponse { success, data }
}
}
impl<T: Serialize> From<ApiResponse<T>> for HttpResponse {
fn from(response: ApiResponse<T>) -> Self {
HttpResponse::Ok().json(response)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/mod.rs | crates/shared/src/models/mod.rs | pub mod api;
pub mod heartbeat;
pub mod metric;
pub mod node;
pub mod storage;
pub mod task;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/task.rs | crates/shared/src/models/task.rs | use std::collections::HashMap;
use chrono::Utc;
use redis::{ErrorKind, FromRedisValue, RedisError, RedisResult, RedisWrite, ToRedisArgs, Value};
use serde::{Deserialize, Serialize};
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use utoipa::ToSchema;
use uuid::Uuid;
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, ToSchema)]
pub enum TaskState {
PENDING,
PULLING,
RUNNING,
COMPLETED,
FAILED,
PAUSED,
RESTARTING,
#[default]
UNKNOWN,
}
impl From<&str> for TaskState {
fn from(s: &str) -> Self {
match s {
"PENDING" => TaskState::PENDING,
"PULLING" => TaskState::PULLING,
"RUNNING" => TaskState::RUNNING,
"COMPLETED" => TaskState::COMPLETED,
"FAILED" => TaskState::FAILED,
"PAUSED" => TaskState::PAUSED,
"RESTARTING" => TaskState::RESTARTING,
"UNKNOWN" | &_ => TaskState::UNKNOWN,
}
}
}
impl std::fmt::Display for TaskState {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let state_str = match self {
TaskState::PENDING => "PENDING",
TaskState::PULLING => "PULLING",
TaskState::RUNNING => "RUNNING",
TaskState::COMPLETED => "COMPLETED",
TaskState::FAILED => "FAILED",
TaskState::PAUSED => "PAUSED",
TaskState::RESTARTING => "RESTARTING",
TaskState::UNKNOWN => "UNKNOWN",
};
write!(f, "{state_str}")
}
}
// Scheduling config
// Proper typing and validation currently missing
// Issue: https://github.com/PrimeIntellect-ai/protocol/issues/338
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, ToSchema)]
pub struct SchedulingConfig {
pub plugins: Option<HashMap<String, HashMap<String, Vec<String>>>>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, ToSchema)]
pub struct VolumeMount {
/// Name/path of the volume on the host (supports label replacements)
pub host_path: String,
/// Path where the volume should be mounted in the container
pub container_path: String,
}
impl VolumeMount {
/// Replace labels in the host_path with actual values
/// Note: GROUP_ID replacement is handled by the node groups plugin
/// (temporary until we have an expander trait)
pub fn replace_labels(&self, task_id: &str, node_address: Option<&str>) -> Self {
let mut host_path = self.host_path.clone();
let mut container_path = self.container_path.clone();
// Replace ${TASK_ID} with actual task ID
host_path = host_path.replace("${TASK_ID}", task_id);
container_path = container_path.replace("${TASK_ID}", task_id);
// Replace ${NODE_ADDRESS} with actual node address if provided
if let Some(addr) = node_address {
host_path = host_path.replace("${NODE_ADDRESS}", addr);
container_path = container_path.replace("${NODE_ADDRESS}", addr);
}
// Get current timestamp for ${TIMESTAMP}
let timestamp = chrono::Utc::now().timestamp().to_string();
host_path = host_path.replace("${TIMESTAMP}", ×tamp);
container_path = container_path.replace("${TIMESTAMP}", ×tamp);
Self {
host_path,
container_path,
}
}
/// Validate the volume mount configuration
pub fn validate(&self) -> Result<(), String> {
if self.host_path.is_empty() {
return Err("Host path cannot be empty".to_string());
}
if self.container_path.is_empty() {
return Err("Container path cannot be empty".to_string());
}
// Check for supported variables
let supported_vars = [
"${TASK_ID}",
"${GROUP_ID}",
"${TIMESTAMP}",
"${NODE_ADDRESS}",
];
let re = regex::Regex::new(r"\$\{[^}]+\}").unwrap();
// Check host_path
for cap in re.find_iter(&self.host_path) {
let var = cap.as_str();
if !supported_vars.contains(&var) {
return Err(format!(
"Volume mount host_path contains unsupported variable: {var}. Supported variables: {supported_vars:?}"
));
}
}
// Check container_path
for cap in re.find_iter(&self.container_path) {
let var = cap.as_str();
if !supported_vars.contains(&var) {
return Err(format!(
"Volume mount container_path contains unsupported variable: {var}. Supported variables: {supported_vars:?}"
));
}
}
Ok(())
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)]
pub struct TaskRequest {
pub image: String,
pub name: String,
pub env_vars: Option<std::collections::HashMap<String, String>>,
pub cmd: Option<Vec<String>>,
pub entrypoint: Option<Vec<String>>,
pub scheduling_config: Option<SchedulingConfig>,
pub storage_config: Option<StorageConfig>,
pub metadata: Option<TaskMetadata>,
pub volume_mounts: Option<Vec<VolumeMount>>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, ToSchema)]
pub struct TaskMetadata {
pub labels: Option<HashMap<String, String>>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, ToSchema)]
pub struct Task {
pub name: String,
#[serde(default = "Uuid::new_v4")]
pub id: Uuid,
pub image: String,
pub env_vars: Option<std::collections::HashMap<String, String>>,
pub cmd: Option<Vec<String>>,
pub entrypoint: Option<Vec<String>>,
pub state: TaskState,
#[serde(default)]
pub created_at: i64,
#[serde(default)]
pub updated_at: Option<u64>,
#[serde(default)]
pub scheduling_config: Option<SchedulingConfig>,
#[serde(default)]
pub storage_config: Option<StorageConfig>,
#[serde(default)]
pub metadata: Option<TaskMetadata>,
#[serde(default)]
pub volume_mounts: Option<Vec<VolumeMount>>,
}
impl Task {
/// Generate a hash of the task configuration for comparison purposes
pub fn generate_config_hash(&self) -> u64 {
let mut hasher = DefaultHasher::new();
// Hash core configuration
self.image.hash(&mut hasher);
self.cmd.hash(&mut hasher);
self.entrypoint.hash(&mut hasher);
// Hash environment variables in sorted order for consistency
if let Some(env_vars) = &self.env_vars {
let mut sorted_env: Vec<_> = env_vars.iter().collect();
sorted_env.sort_by_key(|(k, _)| *k);
for (key, value) in sorted_env {
key.hash(&mut hasher);
value.hash(&mut hasher);
}
}
// Hash volume mounts in sorted order for consistency
if let Some(volume_mounts) = &self.volume_mounts {
let mut sorted_volumes: Vec<_> = volume_mounts.iter().collect();
sorted_volumes.sort_by(|a, b| {
a.host_path
.cmp(&b.host_path)
.then_with(|| a.container_path.cmp(&b.container_path))
});
for volume_mount in sorted_volumes {
volume_mount.host_path.hash(&mut hasher);
volume_mount.container_path.hash(&mut hasher);
}
}
hasher.finish()
}
}
impl Default for Task {
fn default() -> Self {
Self {
name: String::new(),
id: Uuid::new_v4(),
image: String::new(),
env_vars: None,
cmd: None,
entrypoint: None,
state: TaskState::default(),
created_at: 0,
updated_at: None,
scheduling_config: None,
storage_config: None,
metadata: None,
volume_mounts: None,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, ToSchema)]
pub struct StorageConfig {
pub file_name_template: Option<String>,
}
impl StorageConfig {
pub fn validate(&self) -> Result<(), String> {
if let Some(template) = &self.file_name_template {
let valid_vars = [
"${ORIGINAL_NAME}",
"${NODE_GROUP_ID}",
"${NODE_GROUP_SIZE}",
"${NODE_GROUP_INDEX}",
"${TOTAL_UPLOAD_COUNT_AFTER}",
"${CURRENT_FILE_INDEX}",
];
let re = regex::Regex::new(r"\$\{[^}]+\}").unwrap();
for cap in re.find_iter(template) {
let var = cap.as_str();
if !valid_vars.contains(&var) {
return Err(format!(
"Storage config template contains invalid variable: {var}"
));
}
}
}
Ok(())
}
}
impl TryFrom<TaskRequest> for Task {
type Error = String;
fn try_from(request: TaskRequest) -> Result<Self, Self::Error> {
if let Some(storage_config) = &request.storage_config {
storage_config.validate()?;
}
if let Some(volume_mounts) = &request.volume_mounts {
for volume_mount in volume_mounts {
volume_mount.validate()?;
}
}
Ok(Task {
name: request.name,
id: Uuid::new_v4(),
image: request.image,
cmd: request.cmd,
entrypoint: request.entrypoint,
env_vars: request.env_vars,
state: TaskState::PENDING,
created_at: Utc::now().timestamp_millis(),
updated_at: None,
scheduling_config: request.scheduling_config,
storage_config: request.storage_config,
metadata: request.metadata,
volume_mounts: request.volume_mounts,
})
}
}
impl FromRedisValue for Task {
fn from_redis_value(v: &Value) -> RedisResult<Self> {
match v {
Value::BulkString(s) => {
let task: Task = serde_json::from_slice(s).map_err(|_| {
RedisError::from((
ErrorKind::TypeError,
"Failed to deserialize Task from string",
format!("Invalid JSON string: {s:?}"),
))
})?;
Ok(task)
}
_ => Err(RedisError::from((
ErrorKind::TypeError,
"Response type not compatible with Task",
format!("Received: {v:?}"),
))),
}
}
}
impl ToRedisArgs for Task {
fn write_redis_args<W>(&self, out: &mut W)
where
W: ?Sized + RedisWrite,
{
let task_json = serde_json::to_string(self).expect("Failed to serialize Task to JSON");
out.write_arg(task_json.as_bytes());
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_volume_mount_label_replacement() {
let volume_mount = VolumeMount {
host_path: "/host/data/${TASK_ID}".to_string(),
container_path: "/container/data/${TASK_ID}".to_string(),
};
let processed = volume_mount.replace_labels("task-123", Some("node-addr"));
assert_eq!(processed.host_path, "/host/data/task-123");
assert_eq!(processed.container_path, "/container/data/task-123");
}
#[test]
fn test_volume_mount_label_replacement_without_group() {
let volume_mount = VolumeMount {
host_path: "/host/data/${TASK_ID}".to_string(),
container_path: "/container/data/${TASK_ID}".to_string(),
};
let processed = volume_mount.replace_labels("task-789", None);
assert_eq!(processed.host_path, "/host/data/task-789");
assert_eq!(processed.container_path, "/container/data/task-789");
}
#[test]
fn test_volume_mount_with_timestamp() {
let volume_mount = VolumeMount {
host_path: "/host/logs/${TASK_ID}-${TIMESTAMP}".to_string(),
container_path: "/container/logs".to_string(),
};
let processed = volume_mount.replace_labels("task-123", None);
assert!(processed.host_path.starts_with("/host/logs/task-123-"));
assert!(processed.host_path.len() > "/host/logs/task-123-".len());
assert_eq!(processed.container_path, "/container/logs");
}
#[test]
fn test_volume_mount_validation_success() {
let volume_mount = VolumeMount {
host_path: "/host/data/${TASK_ID}".to_string(),
container_path: "/container/data".to_string(),
};
assert!(volume_mount.validate().is_ok());
}
#[test]
fn test_volume_mount_validation_with_node_address() {
let volume_mount = VolumeMount {
host_path: "/host/data/${NODE_ADDRESS}".to_string(),
container_path: "/container/data/${TASK_ID}".to_string(),
};
assert!(volume_mount.validate().is_ok());
}
#[test]
fn test_volume_mount_validation_empty_host_path() {
let volume_mount = VolumeMount {
host_path: "".to_string(),
container_path: "/container/data".to_string(),
};
assert!(volume_mount.validate().is_err());
assert_eq!(
volume_mount.validate().unwrap_err(),
"Host path cannot be empty"
);
}
#[test]
fn test_volume_mount_validation_empty_container_path() {
let volume_mount = VolumeMount {
host_path: "/host/data".to_string(),
container_path: "".to_string(),
};
assert!(volume_mount.validate().is_err());
assert_eq!(
volume_mount.validate().unwrap_err(),
"Container path cannot be empty"
);
}
#[test]
fn test_volume_mount_validation_unsupported_variable() {
let volume_mount = VolumeMount {
host_path: "/host/data/${UNSUPPORTED_VAR}".to_string(),
container_path: "/container/data".to_string(),
};
assert!(volume_mount.validate().is_err());
assert!(volume_mount
.validate()
.unwrap_err()
.contains("unsupported variable: ${UNSUPPORTED_VAR}"));
}
#[test]
fn test_task_with_volume_mounts() {
let task_request = TaskRequest {
image: "ubuntu:latest".to_string(),
name: "test-task".to_string(),
volume_mounts: Some(vec![
VolumeMount {
host_path: "/host/data/${TASK_ID}".to_string(),
container_path: "/data".to_string(),
},
VolumeMount {
host_path: "/host/logs/${TASK_ID}".to_string(),
container_path: "/logs".to_string(),
},
]),
..Default::default()
};
let task = Task::try_from(task_request).unwrap();
assert!(task.volume_mounts.is_some());
assert_eq!(task.volume_mounts.as_ref().unwrap().len(), 2);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/metric.rs | crates/shared/src/models/metric.rs | use anyhow::{bail, Result};
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, ToSchema)]
pub struct MetricEntry {
pub key: MetricKey,
pub value: f64,
}
#[derive(Debug, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, ToSchema)]
pub struct MetricKey {
pub task_id: String,
pub label: String,
}
impl MetricEntry {
pub fn new(key: MetricKey, value: f64) -> Result<Self> {
let entry = Self { key, value };
entry.validate()?;
Ok(entry)
}
pub fn validate(&self) -> Result<()> {
if !self.value.is_finite() {
bail!("Value must be a finite number");
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_valid_metric() -> Result<()> {
let key = MetricKey {
task_id: "task1".to_string(),
label: "cpu".to_string(),
};
let valid_values = vec![1.0, 100.0, -5.0, 0.0];
for value in valid_values {
let entry = MetricEntry::new(key.clone(), value)?;
assert!(entry.validate().is_ok());
}
Ok(())
}
#[test]
fn test_invalid_metrics() {
let key = MetricKey {
task_id: "task1".to_string(),
label: "cpu".to_string(),
};
let invalid_values = vec![(f64::INFINITY, "infinite value"), (f64::NAN, "NaN value")];
for (value, case) in invalid_values {
let entry = MetricEntry::new(key.clone(), value);
assert!(entry.is_err(), "Should fail for {}", case);
}
}
#[test]
fn test_metric_key() {
let key1 = MetricKey {
task_id: "task1".to_string(),
label: "cpu".to_string(),
};
let key2 = MetricKey {
task_id: "task1".to_string(),
label: "cpu".to_string(),
};
let key3 = MetricKey {
task_id: "task2".to_string(),
label: "cpu".to_string(),
};
assert_eq!(key1, key2);
assert_ne!(key1, key3);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/models/heartbeat.rs | crates/shared/src/models/heartbeat.rs | use super::api::ApiResponse;
use super::metric::MetricEntry;
use super::task::Task;
use actix_web::HttpResponse;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)]
pub struct HeartbeatResponse {
pub current_task: Option<Task>,
}
impl From<HeartbeatResponse> for ApiResponse<HeartbeatResponse> {
fn from(response: HeartbeatResponse) -> Self {
ApiResponse::new(true, response)
}
}
impl From<HeartbeatResponse> for HttpResponse {
fn from(response: HeartbeatResponse) -> Self {
ApiResponse::new(true, response).into()
}
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, ToSchema)]
pub struct TaskDetails {
pub docker_image_id: Option<String>,
pub container_id: Option<String>,
pub container_status: Option<String>,
pub container_created_at: Option<i64>,
pub container_exit_code: Option<i64>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default, ToSchema)]
pub struct HeartbeatRequest {
pub address: String,
pub task_id: Option<String>,
pub task_state: Option<String>,
pub metrics: Option<Vec<MetricEntry>>,
#[serde(default)]
pub version: Option<String>,
pub timestamp: Option<u64>,
#[serde(default)]
pub p2p_id: Option<String>,
#[serde(default)]
pub task_details: Option<TaskDetails>,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/utils/mod.rs | crates/shared/src/utils/mod.rs | use std::collections::HashMap;
use async_trait::async_trait;
use std::sync::Arc;
use tokio::sync::Mutex;
pub mod google_cloud;
use anyhow::Result;
#[async_trait]
pub trait StorageProvider: Send + Sync {
/// Check if a file exists in storage
async fn file_exists(&self, object_path: &str) -> Result<bool>;
/// Generate a mapping file that maps SHA256 hash to original filename
async fn generate_mapping_file(&self, sha256: &str, file_name: &str) -> Result<String>;
/// Resolve mapping for a given SHA256 hash to get the original filename
async fn resolve_mapping_for_sha(&self, sha256: &str) -> Result<String>;
/// Generate a signed URL for uploading (optional for mock)
async fn generate_upload_signed_url(
&self,
object_path: &str,
content_type: Option<String>,
expiration: std::time::Duration,
max_bytes: Option<u64>,
) -> Result<String>;
}
pub struct MockStorageProvider {
mapping_files: Arc<Mutex<HashMap<String, String>>>, // Maps SHA256 to filename
files: Arc<Mutex<HashMap<String, String>>>, // Maps filepath to content
}
impl Default for MockStorageProvider {
fn default() -> Self {
Self::new()
}
}
impl MockStorageProvider {
pub fn new() -> Self {
Self {
mapping_files: Arc::new(Mutex::new(HashMap::new())),
files: Arc::new(Mutex::new(HashMap::new())),
}
}
pub fn with_data(
mapping_files: HashMap<String, String>,
files: HashMap<String, String>,
) -> Self {
Self {
mapping_files: Arc::new(Mutex::new(mapping_files)),
files: Arc::new(Mutex::new(files)),
}
}
pub async fn add_mapping_file(&self, sha256: &str, file_name: &str) {
let mut mappings = self.mapping_files.lock().await;
mappings.insert(sha256.to_string(), file_name.to_string());
}
pub async fn add_file(&self, path: &str, content: &str) {
let mut files = self.files.lock().await;
files.insert(path.to_string(), content.to_string());
}
}
#[async_trait]
impl StorageProvider for MockStorageProvider {
async fn file_exists(&self, object_path: &str) -> Result<bool> {
let files = self.files.lock().await;
Ok(files.contains_key(object_path))
}
async fn generate_mapping_file(&self, sha256: &str, file_name: &str) -> Result<String> {
// Store the mapping of SHA256 to filename
let mapping_path = format!("mapping/{sha256}");
self.add_mapping_file(sha256, file_name).await;
// Also store the mapping file content in our mock storage
let mapping_content = format!("{sha256}:{file_name}");
self.add_file(&mapping_path, &mapping_content).await;
Ok(mapping_path)
}
async fn resolve_mapping_for_sha(&self, sha256: &str) -> Result<String> {
// Retrieve the original filename from the mapping
let mappings = self.mapping_files.lock().await;
mappings
.get(sha256)
.cloned()
.ok_or_else(|| anyhow::anyhow!("No mapping found for SHA256: {}", sha256))
}
async fn generate_upload_signed_url(
&self,
object_path: &str,
_content_type: Option<String>,
_expiration: std::time::Duration,
_max_bytes: Option<u64>,
) -> Result<String> {
// For a mock, we can return a fake signed URL
Ok(format!(
"https://mock-storage.example.com/upload/{object_path}"
))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_mock_storage_provider() {
let provider = MockStorageProvider::new();
provider.add_mapping_file("sha256", "file.txt").await;
provider.add_file("file.txt", "content").await;
let map_file_link = provider.resolve_mapping_for_sha("sha256").await.unwrap();
println!("map_file_link: {}", map_file_link);
assert_eq!(map_file_link, "file.txt");
assert_eq!(
provider.resolve_mapping_for_sha("sha256").await.unwrap(),
"file.txt"
);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/utils/google_cloud.rs | crates/shared/src/utils/google_cloud.rs | use anyhow::Result;
use async_trait::async_trait;
use base64::{engine::general_purpose, Engine as _};
use google_cloud_storage::client::google_cloud_auth::credentials::CredentialsFile;
use google_cloud_storage::client::{Client, ClientConfig};
use google_cloud_storage::http::objects::download::Range;
use google_cloud_storage::http::objects::get::GetObjectRequest;
use google_cloud_storage::http::objects::upload::{Media, UploadObjectRequest, UploadType};
use google_cloud_storage::sign::{SignedURLMethod, SignedURLOptions};
use log::debug;
use std::time::Duration;
use super::StorageProvider;
#[derive(Clone)]
pub struct GcsStorageProvider {
bucket: String,
client: Client,
}
impl GcsStorageProvider {
pub async fn new(bucket: &str, credentials_base64: &str) -> Result<Self> {
let credentials_json = general_purpose::STANDARD
.decode(credentials_base64)
.map_err(|e| anyhow::anyhow!("Failed to decode base64 credentials: {}", e))?;
let credentials_str = String::from_utf8(credentials_json)
.map_err(|e| anyhow::anyhow!("Failed to convert credentials to UTF-8: {}", e))?;
// Create client config directly from the JSON string
let credentials = CredentialsFile::new_from_str(&credentials_str)
.await
.map_err(|e| anyhow::anyhow!("Failed to parse credentials: {}", e))?;
let config = ClientConfig::default()
.with_credentials(credentials)
.await
.map_err(|e| anyhow::anyhow!("Failed to configure client: {}", e))?;
Ok(Self {
bucket: bucket.to_string(),
client: Client::new(config),
})
}
fn get_bucket_name(bucket: &str) -> (String, String) {
if let Some(idx) = bucket.find('/') {
let (bucket_part, subpath_part) = bucket.split_at(idx);
(
bucket_part.to_string(),
subpath_part.trim_start_matches('/').to_string(),
)
} else {
(bucket.to_string(), "".to_string())
}
}
}
#[async_trait]
impl StorageProvider for GcsStorageProvider {
async fn file_exists(&self, object_path: &str) -> Result<bool> {
let client = self.client.clone();
let (bucket_name, subpath) = Self::get_bucket_name(&self.bucket);
let object_path = object_path.strip_prefix('/').unwrap_or(object_path);
let full_path = if !subpath.is_empty() {
format!("{subpath}/{object_path}")
} else {
object_path.to_string()
};
match client
.get_object(&GetObjectRequest {
bucket: bucket_name,
object: full_path,
..Default::default()
})
.await
{
Ok(_) => Ok(true),
Err(_) => Ok(false),
}
}
async fn generate_mapping_file(&self, sha256: &str, file_name: &str) -> Result<String> {
let client = self.client.clone();
let mapping_path = format!("mapping/{sha256}");
let file_name = file_name.strip_prefix('/').unwrap_or(file_name);
let content = file_name.to_string().into_bytes();
let (bucket_name, subpath) = Self::get_bucket_name(&self.bucket);
let object_path = if !subpath.is_empty() {
format!("{subpath}/{mapping_path}")
} else {
mapping_path.clone()
};
let upload_type = UploadType::Simple(Media::new(object_path.clone()));
let uploaded = client
.upload_object(
&UploadObjectRequest {
bucket: bucket_name,
..Default::default()
},
content,
&upload_type,
)
.await;
debug!("Uploaded mapping file: {:?}", uploaded);
Ok(mapping_path)
}
async fn resolve_mapping_for_sha(&self, sha256: &str) -> Result<String> {
let client = self.client.clone();
let (bucket_name, subpath) = Self::get_bucket_name(&self.bucket);
let mapping_path = format!("mapping/{sha256}");
let object_path = if !subpath.is_empty() {
format!("{subpath}/{mapping_path}")
} else {
mapping_path.clone()
};
// Download the mapping file content
let content = client
.download_object(
&GetObjectRequest {
bucket: bucket_name,
object: object_path.clone(),
..Default::default()
},
&Range::default(),
)
.await?;
// Convert bytes to string
let file_name = String::from_utf8(content)?;
Ok(file_name)
}
async fn generate_upload_signed_url(
&self,
object_path: &str,
content_type: Option<String>,
expiration: Duration,
max_bytes: Option<u64>,
) -> Result<String> {
let client = self.client.clone();
let (bucket_name, subpath) = Self::get_bucket_name(&self.bucket);
// Ensure object_path does not start with a /
let object_path = object_path.strip_prefix('/').unwrap_or(object_path);
let object_path = if !subpath.is_empty() {
format!("{subpath}/{object_path}")
} else {
object_path.to_string()
};
// Set options for the signed URL
let mut options = SignedURLOptions {
method: SignedURLMethod::PUT,
expires: expiration,
content_type,
..Default::default()
};
// Set max bytes if specified
if let Some(bytes) = max_bytes {
options.headers = vec![format!("content-length:{}", bytes)];
}
// Generate the signed URL
let signed_url = client
.signed_url(
bucket_name.as_str(),
object_path.as_str(),
None,
None,
options,
)
.await?;
Ok(signed_url)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::utils::StorageProvider;
use rand::Rng;
#[tokio::test]
async fn test_generate_mapping_file() {
// Check if required environment variables are set
let bucket_name = match std::env::var("S3_BUCKET_NAME") {
Ok(name) => name,
Err(_) => {
println!("Skipping test: BUCKET_NAME not set");
return;
}
};
let credentials_base64 = match std::env::var("S3_CREDENTIALS") {
Ok(credentials) => credentials,
Err(_) => {
println!("Skipping test: S3_CREDENTIALS not set");
return;
}
};
let storage = GcsStorageProvider::new(&bucket_name, &credentials_base64)
.await
.unwrap();
let random_sha256: String = rand::rng().random_range(0..=u64::MAX).to_string();
let mapping_content = storage
.generate_mapping_file(&random_sha256, "run_1/file.parquet")
.await
.unwrap();
println!("mapping_content: {}", mapping_content);
println!("bucket_name: {}", bucket_name);
let original_file_name = storage
.resolve_mapping_for_sha(&random_sha256)
.await
.unwrap();
println!("original_file_name: {}", original_file_name);
assert_eq!(original_file_name, "run_1/file.parquet");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/security/auth_signature_middleware.rs | crates/shared/src/security/auth_signature_middleware.rs | use actix_web::dev::Payload;
use actix_web::dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform};
use actix_web::error::{ErrorBadRequest, PayloadError};
use actix_web::web::Bytes;
use actix_web::web::BytesMut;
use actix_web::HttpMessage;
use actix_web::{Error, Result};
use alloy::primitives::Address;
use alloy::signers::Signature;
use dashmap::DashMap;
use dashmap::DashSet;
use futures_util::future::LocalBoxFuture;
use futures_util::future::{self};
use futures_util::Stream;
use futures_util::StreamExt;
use log::{debug, error, warn};
use redis::AsyncCommands;
use serde_json::json;
use std::future::{ready, Ready};
use std::pin::Pin;
use std::rc::Rc;
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tokio::time::timeout; // If you're using tokio
// Maximum request body size in bytes
const MAX_BODY_SIZE: usize = 1024 * 1024 * 10; // 10MB
const BODY_TIMEOUT_SECS: u64 = 20; // 20 seconds
const NONCE_EXPIRATION_SECS: u64 = 60; // 1 minute
const MAX_NONCE_LENGTH: usize = 64;
const MIN_NONCE_LENGTH: usize = 16;
const RATE_LIMIT_WINDOW_SECS: u64 = 60;
const MAX_REQUESTS_PER_WINDOW: usize = 100;
const REQUEST_EXPIRY_SECS: u64 = 300;
type SyncAddressValidator = Arc<dyn Fn(&Address) -> bool + Send + Sync>;
type AsyncAddressValidator = Arc<dyn Fn(&Address) -> LocalBoxFuture<'static, bool> + Send + Sync>;
#[derive(Clone)]
pub struct ValidatorState {
allowed_addresses: Arc<DashSet<Address>>,
external_validator: Option<SyncAddressValidator>,
async_validator: Option<AsyncAddressValidator>,
redis_pool: Option<Arc<redis::aio::MultiplexedConnection>>,
rate_limiter: Arc<DashMap<Address, (Instant, usize)>>,
}
impl ValidatorState {
pub fn new(initial_addresses: Vec<Address>) -> Self {
let set = DashSet::new();
for address in initial_addresses {
set.insert(address);
}
Self {
allowed_addresses: Arc::new(set),
external_validator: None,
async_validator: None,
redis_pool: None,
rate_limiter: Arc::new(DashMap::new()),
}
}
pub async fn with_redis(
mut self,
redis_client: redis::Client,
) -> Result<Self, redis::RedisError> {
let conn = redis_client.get_multiplexed_async_connection().await?;
self.redis_pool = Some(Arc::new(conn));
Ok(self)
}
pub fn with_validator<F>(mut self, validator: F) -> Self
where
F: Fn(&Address) -> bool + Send + Sync + 'static,
{
self.external_validator = Some(Arc::new(validator));
self
}
pub fn with_async_validator<F>(mut self, validator: F) -> Self
where
F: Fn(&Address) -> LocalBoxFuture<'static, bool> + Send + Sync + 'static,
{
self.async_validator = Some(Arc::new(validator));
self
}
pub fn add_address(&self, address: Address) {
self.allowed_addresses.insert(address);
}
pub fn remove_address(&self, address: &Address) {
self.allowed_addresses.remove(address);
}
pub fn iter_addresses(&self) -> impl Iterator<Item = Address> + '_ {
self.allowed_addresses.iter().map(|addr| *addr)
}
pub fn get_allowed_addresses(&self) -> Vec<Address> {
self.iter_addresses().collect()
}
pub fn is_address_allowed(&self, address: &Address) -> bool {
if self.allowed_addresses.contains(address) {
return true;
}
if let Some(validator) = &self.external_validator {
return validator(address);
}
false
}
pub async fn is_address_allowed_async(&self, address: &Address) -> bool {
if self.allowed_addresses.contains(address) {
return true;
}
if let Some(validator) = &self.external_validator {
if validator(address) {
return true;
}
}
if let Some(async_validator) = &self.async_validator {
return async_validator(address).await;
}
false
}
pub fn validate_nonce_format(&self, nonce: &str) -> bool {
if nonce.len() < MIN_NONCE_LENGTH || nonce.len() > MAX_NONCE_LENGTH {
return false;
}
nonce.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
}
pub fn check_rate_limit(&self, address: &Address) -> bool {
let now = Instant::now();
let mut entry = self.rate_limiter.entry(*address).or_insert((now, 0));
let (window_start, count) = entry.value_mut();
if now.duration_since(*window_start).as_secs() >= RATE_LIMIT_WINDOW_SECS {
*window_start = now;
*count = 1;
true
} else if *count >= MAX_REQUESTS_PER_WINDOW {
false
} else {
*count += 1;
true
}
}
pub async fn check_and_store_nonce(&self, nonce: &str) -> Result<bool, redis::RedisError> {
if !self.validate_nonce_format(nonce) {
return Ok(false);
}
if let Some(pool) = &self.redis_pool {
let mut conn = pool.as_ref().clone();
let nonce_key = format!("nonce:{nonce}");
let result: Option<String> = conn
.set_options(
&nonce_key,
"1",
redis::SetOptions::default()
.conditional_set(redis::ExistenceCheck::NX)
.get(true)
.with_expiration(redis::SetExpiry::EX(NONCE_EXPIRATION_SECS)),
)
.await?;
Ok(result.is_none())
} else {
Ok(true)
}
}
}
pub struct ValidateSignature {
validator_state: Arc<ValidatorState>,
}
impl ValidateSignature {
pub fn new(state: Arc<ValidatorState>) -> Self {
Self {
validator_state: state,
}
}
}
impl<S, B> Transform<S, ServiceRequest> for ValidateSignature
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error> + 'static,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type InitError = ();
type Transform = ValidateSignatureMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(ValidateSignatureMiddleware {
service: Rc::new(service),
validator_state: self.validator_state.clone(),
}))
}
}
pub struct ValidateSignatureMiddleware<S> {
service: Rc<S>,
validator_state: Arc<ValidatorState>,
}
impl<S, B> Service<ServiceRequest> for ValidateSignatureMiddleware<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error> + 'static,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
forward_ready!(service);
fn call(&self, mut req: ServiceRequest) -> Self::Future {
let service = self.service.clone();
let path = req.path().to_string();
let validator_state = self.validator_state.clone();
// Extract headers before consuming the request
let x_address = req
.headers()
.get("x-address")
.and_then(|h| h.to_str().ok())
.map(|s| s.to_string());
let x_signature = req
.headers()
.get("x-signature")
.and_then(|h| h.to_str().ok())
.map(|s| s.to_string());
Box::pin(async move {
// Collect the full body with size limit and timeout
let mut body = BytesMut::new();
let mut payload = req.take_payload();
let start_time = Instant::now();
// Create a timeout for the entire body reading process
let body_read_future = async {
while let Some(chunk) = payload.next().await {
let chunk = chunk?;
// Check if adding this chunk would exceed the size limit
if body.len() + chunk.len() > MAX_BODY_SIZE {
return Err(ErrorBadRequest(json!({
"error": "Request body too large",
"code": "BODY_TOO_LARGE",
"max_size": MAX_BODY_SIZE
})));
}
// Check if we've exceeded the time limit for reading the body
if start_time.elapsed() > Duration::from_secs(BODY_TIMEOUT_SECS) {
return Err(ErrorBadRequest(json!({
"error": "Request body read timeout",
"code": "BODY_READ_TIMEOUT",
"timeout_seconds": BODY_TIMEOUT_SECS
})));
}
body.extend_from_slice(chunk.as_ref());
}
Ok::<_, Error>(body)
};
// Apply timeout to the entire body reading process as a fallback
let Ok(body_result) =
timeout(Duration::from_secs(BODY_TIMEOUT_SECS), body_read_future).await
else {
return Err(ErrorBadRequest(json!({
"error": "Request body read timeout",
"code": "BODY_READ_TIMEOUT",
"timeout_seconds": BODY_TIMEOUT_SECS
})));
};
// If there was an error reading the body, return it
let body = body_result?;
// Handle GET requests which do not have a payload
let mut payload_string = String::new();
let mut timestamp = None;
let mut nonce = None;
if req.method() != actix_web::http::Method::GET {
// Parse and sort the payload
let payload_value: serde_json::Value = match serde_json::from_slice(&body) {
Ok(val) => val,
Err(e) => {
error!("Error parsing payload: {e:?}");
return Err(ErrorBadRequest(json!({
"error": "Invalid JSON payload",
"code": "INVALID_JSON",
"details": e.to_string()
})));
}
};
let mut payload_data = payload_value.clone();
if let Some(obj) = payload_data.as_object_mut() {
nonce = obj
.get("nonce")
.and_then(|v| v.as_str())
.map(|s| s.to_string());
let sorted_keys: Vec<String> = obj.keys().cloned().collect();
let sorted_obj: serde_json::Map<String, serde_json::Value> = sorted_keys
.into_iter()
.map(|key| (key.clone(), obj.remove(&key).unwrap()))
.collect();
*obj = sorted_obj;
}
payload_string = match serde_json::to_string(&payload_data) {
Ok(s) => s,
Err(e) => {
error!("Error serializing payload: {e:?}");
return Err(ErrorBadRequest(json!({
"error": "Failed to serialize payload",
"code": "SERIALIZATION_ERROR",
"details": e.to_string()
})));
}
};
if let Some(obj) = payload_data.as_object_mut() {
timestamp = obj.get("timestamp").and_then(|v| v.as_u64());
}
}
if timestamp.is_none() {
timestamp = req.uri().query().and_then(|query| {
query
.split('&')
.find(|param| param.starts_with("timestamp="))
.and_then(|param| param.split('=').nth(1))
.and_then(|value| match value.parse::<u64>() {
Ok(ts) => Some(ts),
Err(e) => {
debug!("Failed to parse timestamp from query: {e:?}");
None
}
})
});
}
// Extract nonce from query parameters for GET requests
if nonce.is_none() {
nonce = req.uri().query().and_then(|query| {
query
.split('&')
.find(|param| param.starts_with("nonce="))
.and_then(|param| param.split('=').nth(1))
.map(|value| value.to_string())
});
}
// Combine path and payload
let msg: String = format!("{path}{payload_string}");
// Validate signature
if let (Some(address), Some(signature)) = (x_address, x_signature) {
let signature = signature.trim_start_matches("0x");
let Ok(parsed_signature) = Signature::from_str(signature) else {
return Err(ErrorBadRequest(json!({
"error": "Invalid signature format",
"code": "INVALID_SIGNATURE_FORMAT"
})));
};
let Ok(recovered_address) = parsed_signature.recover_address_from_msg(msg) else {
return Err(ErrorBadRequest(json!({
"error": "Failed to recover address from message",
"code": "ADDRESS_RECOVERY_FAILED"
})));
};
let Ok(expected_address) = Address::from_str(&address) else {
return Err(ErrorBadRequest(json!({
"error": "Invalid address format",
"code": "INVALID_ADDRESS_FORMAT"
})));
};
if recovered_address != expected_address {
debug!("Recovered address: {recovered_address:?}");
debug!("Expected address: {expected_address:?}");
return Err(ErrorBadRequest(json!({
"error": "Invalid signature",
"code": "SIGNATURE_MISMATCH",
})));
}
if !validator_state
.is_address_allowed_async(&recovered_address)
.await
{
warn!(
"Request with valid signature but not authorized. Allowed addresses: {:?}",
validator_state.get_allowed_addresses()
);
return Err(ErrorBadRequest(json!({
"error": "Address not authorized",
"code": "ADDRESS_NOT_AUTHORIZED",
"address": recovered_address.to_string()
})));
}
if !validator_state.check_rate_limit(&recovered_address) {
warn!("Rate limit exceeded for address: {recovered_address}");
return Err(ErrorBadRequest(json!({
"error": "Rate limit exceeded",
"code": "RATE_LIMIT_EXCEEDED",
"message": format!("Maximum {} requests per {} seconds allowed", MAX_REQUESTS_PER_WINDOW, RATE_LIMIT_WINDOW_SECS)
})));
}
if let Some(timestamp) = timestamp {
let current_time = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
if current_time - timestamp > REQUEST_EXPIRY_SECS {
return Err(ErrorBadRequest(json!({
"error": "Request expired",
"code": "REQUEST_EXPIRED",
"timestamp": timestamp,
"current_time": current_time,
"max_age_seconds": 10
})));
}
}
if let Some(nonce_value) = nonce {
if !validator_state.validate_nonce_format(&nonce_value) {
return Err(ErrorBadRequest(json!({
"error": "Invalid nonce format",
"code": "INVALID_NONCE_FORMAT",
"message": format!("Nonce must be {}-{} alphanumeric characters", MIN_NONCE_LENGTH, MAX_NONCE_LENGTH)
})));
}
match validator_state.check_and_store_nonce(&nonce_value).await {
Ok(is_unique) => {
if !is_unique {
return Err(ErrorBadRequest(json!({
"error": "Request replay detected",
"code": "NONCE_ALREADY_USED",
"message": "This nonce has already been used"
})));
}
}
Err(e) => {
error!("Redis error during nonce check: {e:?}");
return Err(ErrorBadRequest(json!({
"error": "Nonce validation failed",
"code": "NONCE_VALIDATION_ERROR",
"message": "Unable to validate request uniqueness"
})));
}
}
} else {
return Err(ErrorBadRequest(json!({
"error": "Missing nonce",
"code": "MISSING_NONCE",
"message": "Request must include a unique nonce for replay protection"
})));
}
// Reconstruct request with the original body
let stream =
futures_util::stream::once(future::ok::<Bytes, PayloadError>(body.freeze()));
let boxed_stream: Pin<Box<dyn Stream<Item = Result<Bytes, PayloadError>>>> =
Box::pin(stream);
req.set_payload(Payload::from(boxed_stream));
service.call(req).await
} else {
Err(ErrorBadRequest(json!({
"error": "Missing signature or address",
"code": "MISSING_AUTH_HEADERS",
"required_headers": ["x-signature", "x-address"]
})))
}
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::web3::wallet::Wallet;
use actix_web::http::StatusCode;
use actix_web::{test, web, App, HttpResponse};
use std::collections::HashSet;
use std::str::FromStr;
use url::Url;
async fn test_handler() -> HttpResponse {
HttpResponse::Ok().finish()
}
#[actix_web::test]
async fn test_missing_headers() {
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(Arc::new(ValidatorState::new(
vec![],
))))
.route("/test", web::post().to(test_handler)),
)
.await;
let req = test::TestRequest::post()
.uri("/test")
.set_json(serde_json::json!({"test": "data", "nonce": "test-nonce-1"}))
.to_request();
let err = test::try_call_service(&app, req).await;
match err {
Err(e) => {
let error_str = e.to_string();
assert!(error_str.contains("Missing signature or address"));
let error_response = e.error_response();
assert_eq!(error_response.status(), StatusCode::BAD_REQUEST);
}
Ok(_) => panic!("Expected an error"),
}
}
#[actix_web::test]
async fn test_invalid_signature() {
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(Arc::new(ValidatorState::new(
vec![],
))))
.route("/test", web::post().to(test_handler)),
)
.await;
let req = test::TestRequest::post()
.uri("/test")
.insert_header(("x-address", "0x742d35Cc6634C0532925a3b844Bc454e4438f44e"))
.insert_header(("x-signature", "0xinvalid_signature"))
.set_json(serde_json::json!({"test": "data", "nonce": "test-nonce-2"}))
.to_request();
let err = test::try_call_service(&app, req).await;
match err {
Err(e) => {
let error_str = e.to_string();
assert!(error_str.contains("Invalid signature format"));
let error_response = e.error_response();
assert_eq!(error_response.status(), StatusCode::BAD_REQUEST);
}
Ok(_) => panic!("Expected an error"),
}
}
#[actix_web::test]
async fn test_valid_signature() {
use crate::security::request_signer::sign_request_with_nonce;
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let address = Address::from_str("0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf").unwrap();
let wallet =
Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap();
let signed_request =
sign_request_with_nonce("/test", &wallet, Some(&serde_json::json!({"test": "data"})))
.await
.unwrap();
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(Arc::new(ValidatorState::new(vec![
address,
]))))
.route("/test", web::post().to(test_handler)),
)
.await;
let req = test::TestRequest::post()
.uri("/test")
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
.insert_header(("x-signature", signed_request.signature))
.set_json(signed_request.data.as_ref().unwrap())
.to_request();
let res = test::call_service(&app, req).await;
assert_eq!(res.status(), StatusCode::OK);
}
#[actix_web::test]
async fn test_valid_signature_get_request() {
use crate::security::request_signer::sign_request_with_nonce;
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let address = Address::from_str("0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf").unwrap();
let wallet =
Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap();
let signed_request = sign_request_with_nonce("/test", &wallet, None)
.await
.unwrap();
let nonce = signed_request.nonce;
let signature = signed_request.signature;
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(Arc::new(ValidatorState::new(vec![
address,
]))))
.route("/test", web::get().to(test_handler)),
)
.await;
log::info!("Address: {}", wallet.wallet.default_signer().address());
log::info!("Signature: {}", signature);
log::info!("Nonce: {}", nonce);
let req = test::TestRequest::get()
.uri(&format!("/test?nonce={}", nonce))
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
.insert_header(("x-signature", signature))
.to_request();
let res = test::call_service(&app, req).await;
assert_eq!(res.status(), StatusCode::OK);
}
#[actix_web::test]
async fn test_valid_signature_but_not_allowed() {
use crate::security::request_signer::sign_request_with_nonce;
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let allowed_address =
Address::from_str("0xeeFBd3F87405FdADa62de677492a805A8dA1B457").unwrap();
let wallet = Wallet::new(
private_key,
Url::parse("https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161").unwrap(),
)
.unwrap();
let signed_request =
sign_request_with_nonce("/test", &wallet, Some(&serde_json::json!({"test": "data"})))
.await
.unwrap();
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(Arc::new(ValidatorState::new(vec![
allowed_address,
]))))
.route("/test", web::post().to(test_handler)),
)
.await;
log::info!("Address: {}", wallet.wallet.default_signer().address());
log::info!("Signature: {}", signed_request.signature);
let req = test::TestRequest::post()
.uri("/test")
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
.insert_header(("x-signature", signed_request.signature))
.set_json(signed_request.data.as_ref().unwrap())
.to_request();
let err = test::try_call_service(&app, req).await;
match err {
Err(e) => {
let error_str = e.to_string();
assert!(error_str.contains("Address not authorized"));
let error_response = e.error_response();
assert_eq!(error_response.status(), StatusCode::BAD_REQUEST);
}
Ok(_) => panic!("Expected an error"),
}
}
#[actix_web::test]
async fn test_multiple_state_clones() {
let address = Address::from_str("0xc1621E38E76E7355D1f9915a05d0BC29d2B09814").unwrap();
let validator_state = Arc::new(ValidatorState::new(vec![]));
// Create multiple clones
let clone1 = validator_state.clone();
let clone2 = validator_state.clone();
let clone3 = clone1.clone();
// Modify through one clone
clone2.add_address(address);
// Verify all clones see the change
assert!(validator_state.is_address_allowed(&address));
assert!(clone1.is_address_allowed(&address));
assert!(clone2.is_address_allowed(&address));
assert!(clone3.is_address_allowed(&address));
// Remove through another clone
clone3.remove_address(&address);
// Verify removal is visible to all
assert!(!validator_state.is_address_allowed(&address));
assert!(!clone1.is_address_allowed(&address));
assert!(!clone2.is_address_allowed(&address));
assert!(!clone3.is_address_allowed(&address));
}
#[actix_web::test]
async fn test_dynamic_allowed_addresses() {
use crate::security::request_signer::sign_request_with_nonce;
let private_key = "0xf72df6ef6f7ff457e693f6acae8dfc289bd54225875e93d013c4aa27a8feec76";
let address = Address::from_str("0xc1621E38E76E7355D1f9915a05d0BC29d2B09814").unwrap();
let wallet = Wallet::new(
private_key,
Url::parse("https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161").unwrap(),
)
.unwrap();
let validator_state = Arc::new(ValidatorState::new(vec![]));
let signed_request1 =
sign_request_with_nonce("/test", &wallet, Some(&serde_json::json!({"test": "data"})))
.await
.unwrap();
let signed_request2 =
sign_request_with_nonce("/test", &wallet, Some(&serde_json::json!({"test": "data"})))
.await
.unwrap();
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(validator_state.clone()))
.route("/test", web::post().to(test_handler)),
)
.await;
let req = test::TestRequest::post()
.uri("/test")
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
.insert_header(("x-signature", signed_request1.signature))
.set_json(signed_request1.data.as_ref().unwrap())
.to_request();
let err = test::try_call_service(&app, req).await;
match err {
Err(e) => {
let error_str = e.to_string();
assert!(error_str.contains("Address not authorized"));
let error_response = e.error_response();
assert_eq!(error_response.status(), StatusCode::BAD_REQUEST);
}
Ok(_) => panic!("Expected an error"),
}
validator_state.add_address(address);
let req_after_address_add = test::TestRequest::post()
.uri("/test")
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
.insert_header(("x-signature", signed_request2.signature))
.set_json(signed_request2.data.as_ref().unwrap())
.to_request();
let res_after_address_add = test::call_service(&app, req_after_address_add).await;
assert_eq!(res_after_address_add.status(), StatusCode::OK);
}
#[actix_web::test]
async fn test_multiple_addresses() {
let validator_state = ValidatorState::new(vec![]);
// Create multiple addresses
let addresses: Vec<Address> = (0..5)
.map(|i| {
Address::from_str(&format!("0x{}000000000000000000000000000000000000000", i))
.unwrap()
})
.collect();
// Add addresses through different clones
let clone1 = validator_state.clone();
let clone2 = validator_state.clone();
for (i, addr) in addresses.iter().enumerate() {
if i % 2 == 0 {
clone1.add_address(*addr);
} else {
clone2.add_address(*addr);
}
}
// Verify all addresses are present
let allowed = validator_state.get_allowed_addresses();
let allowed_set: HashSet<_> = allowed.into_iter().collect();
let expected_set: HashSet<_> = addresses.into_iter().collect();
assert_eq!(allowed_set, expected_set);
}
#[actix_web::test]
async fn test_nonce_replay_protection() {
use crate::security::request_signer::sign_request_with_nonce;
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let address = Address::from_str("0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf").unwrap();
let wallet =
Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap();
let signed_request =
sign_request_with_nonce("/test", &wallet, Some(&serde_json::json!({"test": "data"})))
.await
.unwrap();
let app = test::init_service(
App::new()
.wrap(ValidateSignature::new(Arc::new(ValidatorState::new(vec![
address,
]))))
.route("/test", web::post().to(test_handler)),
)
.await;
let req1 = test::TestRequest::post()
.uri("/test")
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
.insert_header(("x-signature", signed_request.signature.clone()))
.set_json(signed_request.data.as_ref().unwrap())
.to_request();
let res1 = test::call_service(&app, req1).await;
assert_eq!(res1.status(), StatusCode::OK);
let req2 = test::TestRequest::post()
.uri("/test")
.insert_header((
"x-address",
wallet.wallet.default_signer().address().to_string(),
))
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/security/mod.rs | crates/shared/src/security/mod.rs | pub mod api_key_middleware;
pub mod auth_signature_middleware;
pub mod request_signer;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/security/request_signer.rs | crates/shared/src/security/request_signer.rs | // request_signer.rs
use crate::web3::wallet::Wallet;
use alloy::signers::Signer;
use uuid::Uuid;
#[derive(Clone)]
pub struct SignedRequest {
pub signature: String,
pub data: Option<serde_json::Value>,
pub nonce: String,
}
pub async fn sign_request_with_nonce(
endpoint: &str,
wallet: &Wallet,
data: Option<&serde_json::Value>,
) -> Result<SignedRequest, Box<dyn std::error::Error>> {
let nonce = Uuid::new_v4().to_string();
sign_request_with_custom_nonce(endpoint, wallet, data, &nonce).await
}
pub async fn sign_request_with_custom_nonce(
endpoint: &str,
wallet: &Wallet,
data: Option<&serde_json::Value>,
nonce: &str,
) -> Result<SignedRequest, Box<dyn std::error::Error>> {
let mut modified_data = None;
let request_data_string = if let Some(data) = data {
let mut request_data = serde_json::to_value(data)?;
if let Some(obj) = request_data.as_object_mut() {
obj.insert(
"nonce".to_string(),
serde_json::Value::String(nonce.to_string()),
);
let mut sorted_keys: Vec<String> = obj.keys().cloned().collect();
sorted_keys.sort();
*obj = sorted_keys
.into_iter()
.map(|key| (key.clone(), obj.remove(&key).unwrap()))
.collect();
}
modified_data = Some(request_data.clone());
serde_json::to_string(&request_data)?
} else {
String::new()
};
let message = if request_data_string.is_empty() {
endpoint.to_string()
} else {
format!("{endpoint}{request_data_string}")
};
let signature = wallet
.signer
.sign_message(message.as_bytes())
.await?
.as_bytes();
let signature_string = format!("0x{}", hex::encode(signature));
Ok(SignedRequest {
signature: signature_string,
data: modified_data,
nonce: nonce.to_string(),
})
}
pub async fn sign_request(
endpoint: &str,
wallet: &Wallet,
data: Option<&serde_json::Value>,
) -> Result<String, Box<dyn std::error::Error>> {
let signed_request = sign_request_with_nonce(endpoint, wallet, data).await?;
Ok(signed_request.signature)
}
pub async fn sign_message(
message: &str,
wallet: &Wallet,
) -> Result<String, Box<dyn std::error::Error>> {
let signature = wallet
.signer
.sign_message(message.as_bytes())
.await?
.as_bytes();
let signature_string = format!("0x{}", hex::encode(signature));
Ok(signature_string)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::web3::wallet::Wallet;
use serde_json::json;
use url::Url;
#[tokio::test]
async fn test_sign_request() {
// Create test wallet with known private key
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let wallet = Wallet::new(
private_key,
Url::parse("https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161").unwrap(),
)
.unwrap();
// Test data
let endpoint = "/api/test";
let test_data = json!({
"key2": "value2",
"key1": "value1"
});
// Sign request
let signature = sign_request(endpoint, &wallet, Some(&test_data))
.await
.unwrap();
// Verify signature starts with "0x"
assert!(signature.starts_with("0x"));
// Verify signature length (0x + 130 hex chars for 65 bytes)
assert_eq!(signature.len(), 132);
}
#[tokio::test]
async fn test_sign_request_with_empty_data() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let wallet = Wallet::new(
private_key,
Url::parse("https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161").unwrap(),
)
.unwrap();
let endpoint = "/api/test";
let empty_data = json!({});
let signature = sign_request(endpoint, &wallet, Some(&empty_data))
.await
.unwrap();
println!("Signature: {}", signature);
assert!(signature.starts_with("0x"));
assert_eq!(signature.len(), 132);
}
#[tokio::test]
async fn test_key_sorting() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let wallet = Wallet::new(
private_key,
Url::parse("https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161").unwrap(),
)
.unwrap();
let endpoint = "/api/test";
let test_nonce = "test-nonce-123";
// Create two objects with same data but different key order
let data1 = json!({
"a": "1",
"b": "2"
});
let data2 = json!({
"b": "2",
"a": "1"
});
let signed_req1 =
sign_request_with_custom_nonce(endpoint, &wallet, Some(&data1), test_nonce)
.await
.unwrap();
let signed_req2 =
sign_request_with_custom_nonce(endpoint, &wallet, Some(&data2), test_nonce)
.await
.unwrap();
// Signatures should be identical since keys are sorted and nonce is the same
assert_eq!(signed_req1.signature, signed_req2.signature);
}
#[tokio::test]
async fn test_sign_message() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let wallet = Wallet::new(
private_key,
Url::parse("https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161").unwrap(),
)
.unwrap();
let message = "Hello, world!";
let signature = sign_message(message, &wallet).await.unwrap();
// Verify signature starts with "0x"
assert!(signature.starts_with("0x"));
// Verify signature length (0x + 130 hex chars for 65 bytes)
assert_eq!(signature.len(), 132);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/security/api_key_middleware.rs | crates/shared/src/security/api_key_middleware.rs | use actix_web::{
dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform},
error::ErrorUnauthorized,
http::header::AUTHORIZATION,
Error,
};
use futures_util::future::{ready, LocalBoxFuture, Ready};
use subtle::ConstantTimeEq;
pub struct ApiKeyMiddleware {
api_key: String,
}
impl ApiKeyMiddleware {
pub fn new(api_key: String) -> Self {
Self { api_key }
}
}
impl<S, B> Transform<S, ServiceRequest> for ApiKeyMiddleware
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type Transform = ApiKeyMiddlewareService<S>;
type InitError = ();
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(ApiKeyMiddlewareService {
service,
api_key: self.api_key.clone(),
}))
}
}
pub struct ApiKeyMiddlewareService<S> {
service: S,
api_key: String,
}
impl<S, B> Service<ServiceRequest> for ApiKeyMiddlewareService<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<B>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
forward_ready!(service);
fn call(&self, req: ServiceRequest) -> Self::Future {
// Check Authorization header first
if let Some(auth_header) = req.headers().get(AUTHORIZATION) {
if let Ok(auth_str) = auth_header.to_str() {
if auth_str.len() > 7 {
let (scheme, key) = auth_str.split_at(7);
if scheme.eq_ignore_ascii_case("Bearer ") {
let provided_key_bytes = key.as_bytes();
let expected_key_bytes = self.api_key.as_bytes();
if provided_key_bytes.len() == expected_key_bytes.len()
&& provided_key_bytes.ct_eq(expected_key_bytes).into()
{
let fut = self.service.call(req);
return Box::pin(async move {
let res = fut.await?;
Ok(res)
});
}
}
}
}
}
Box::pin(async move { Err(ErrorUnauthorized("Invalid API key")) })
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix_web::{test, web, App, HttpResponse};
async fn test_handler() -> HttpResponse {
HttpResponse::Ok().body("Success")
}
#[actix_web::test]
async fn test_valid_api_key() {
let api_key = "test-api-key";
let app = test::init_service(
App::new()
.wrap(ApiKeyMiddleware::new(api_key.to_string()))
.route("/", web::get().to(test_handler)),
)
.await;
let req = test::TestRequest::get()
.uri("/")
.insert_header(("Authorization", "Bearer test-api-key"))
.to_request();
let resp = test::call_service(&app, req).await;
assert!(resp.status().is_success());
}
#[actix_web::test]
async fn test_invalid_api_key() {
let api_key = "test-api-key";
let app = test::init_service(
App::new()
.wrap(ApiKeyMiddleware::new(api_key.to_string()))
.route("/", web::get().to(test_handler)),
)
.await;
let req = test::TestRequest::get()
.uri("/")
.insert_header(("Authorization", "Bearer wrong-key"))
.to_request();
let resp = app.call(req).await;
assert!(resp.is_err());
assert_eq!(resp.unwrap_err().to_string(), "Invalid API key");
}
#[actix_web::test]
async fn test_missing_auth_header() {
let api_key = "test-api-key";
let app = test::init_service(
App::new()
.wrap(ApiKeyMiddleware::new(api_key.to_string()))
.route("/", web::get().to(test_handler)),
)
.await;
let req = test::TestRequest::get().uri("/").to_request();
let resp = app.call(req).await;
assert!(resp.is_err());
assert_eq!(resp.unwrap_err().to_string(), "Invalid API key");
}
#[actix_web::test]
async fn test_lowercase_bearer_accepted() {
let api_key = "test-API-key";
let app = test::init_service(
App::new()
.wrap(ApiKeyMiddleware::new(api_key.to_string()))
.route("/", web::get().to(test_handler)),
)
.await;
let req = test::TestRequest::get()
.uri("/")
.insert_header(("Authorization", "bearer test-API-key"))
.to_request();
let resp = test::call_service(&app, req).await;
assert!(resp.status().is_success());
let req = test::TestRequest::get()
.uri("/")
.insert_header(("Authorization", "bearer test-api-key"))
.to_request();
let resp = app.call(req).await;
assert!(resp.is_err());
assert_eq!(resp.unwrap_err().to_string(), "Invalid API key");
}
#[actix_web::test]
async fn test_mixed_case_api_key_rejected() {
let api_key = "test-api-key";
let app = test::init_service(
App::new()
.wrap(ApiKeyMiddleware::new(api_key.to_string()))
.route("/", web::get().to(test_handler)),
)
.await;
let req = test::TestRequest::get()
.uri("/")
.insert_header(("Authorization", "BeArEr test-API-key"))
.to_request();
let resp = app.call(req).await;
assert!(resp.is_err());
assert_eq!(resp.unwrap_err().to_string(), "Invalid API key");
}
#[actix_web::test]
async fn test_malformed_auth_header() {
let api_key = "test-api-key";
let app = test::init_service(
App::new()
.wrap(ApiKeyMiddleware::new(api_key.to_string()))
.route("/", web::get().to(test_handler)),
)
.await;
let req = test::TestRequest::get()
.uri("/")
.insert_header(("Authorization", "InvalidFormat"))
.to_request();
let resp = app.call(req).await;
assert!(resp.is_err());
assert_eq!(resp.unwrap_err().to_string(), "Invalid API key");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/mod.rs | crates/shared/src/web3/mod.rs | pub mod contracts;
pub mod wallet;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/wallet.rs | crates/shared/src/web3/wallet.rs | use alloy::primitives::Address;
use alloy::{
network::EthereumWallet,
primitives::U256,
providers::fillers::{
BlobGasFiller, ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller,
},
providers::{Provider, ProviderBuilder, RootProvider},
signers::local::PrivateKeySigner,
};
use alloy_provider::fillers::SimpleNonceManager;
use url::Url;
pub type WalletProvider = FillProvider<
JoinFill<
JoinFill<
JoinFill<
alloy_provider::Identity,
JoinFill<GasFiller, JoinFill<BlobGasFiller, JoinFill<NonceFiller, ChainIdFiller>>>,
>,
NonceFiller<SimpleNonceManager>,
>,
WalletFiller<EthereumWallet>,
>,
RootProvider,
>;
#[derive(Clone)]
pub struct Wallet {
pub wallet: EthereumWallet,
pub signer: PrivateKeySigner,
pub provider: WalletProvider,
}
impl Wallet {
pub fn new(private_key: &str, provider_url: Url) -> Result<Self, Box<dyn std::error::Error>> {
let signer: PrivateKeySigner = private_key.parse()?;
let signer_clone = signer.clone();
let wallet = EthereumWallet::from(signer);
let wallet_clone = wallet.clone();
let provider = ProviderBuilder::new()
.with_simple_nonce_management()
.wallet(wallet_clone)
.connect_http(provider_url);
Ok(Self {
wallet,
signer: signer_clone,
provider,
})
}
pub fn address(&self) -> Address {
self.wallet.default_signer().address()
}
pub async fn get_balance(&self) -> Result<U256, Box<dyn std::error::Error>> {
let address = self.wallet.default_signer().address();
let balance = self.provider.get_balance(address).await?;
Ok(balance)
}
pub fn provider(&self) -> WalletProvider {
self.provider.clone()
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/mod.rs | crates/shared/src/web3/contracts/mod.rs | pub mod constants;
pub mod core;
pub mod helpers;
pub mod implementations;
pub mod structs;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/helpers/utils.rs | crates/shared/src/web3/contracts/helpers/utils.rs | // This is the correct, minimal, and tested fix.
use alloy::contract::CallDecoder;
use alloy::providers::Provider;
use alloy::{
contract::CallBuilder,
primitives::{keccak256, FixedBytes, Selector},
providers::Network,
};
use anyhow::Result;
use log::{debug, info, warn};
use tokio::time::Duration;
use crate::web3::wallet::WalletProvider;
pub fn get_selector(fn_image: &str) -> Selector {
keccak256(fn_image.as_bytes())[..4].try_into().unwrap()
}
pub type PrimeCallBuilder<'a, D> = alloy::contract::CallBuilder<&'a WalletProvider, D>;
pub async fn retry_call<P, D, N>(
mut call: CallBuilder<&P, D, N>,
max_tries: u32,
provider: P,
retry_delay: Option<u64>,
) -> Result<FixedBytes<32>>
where
P: Provider<N> + Clone,
N: Network,
D: CallDecoder + Clone,
{
const PENDING_TRANSACTION_TIMEOUT: Duration = Duration::from_secs(60);
let mut tries = 0;
let retry_delay = retry_delay.unwrap_or(2);
let mut tx_hash = None;
while tries < max_tries {
if tries > 0 {
tokio::time::sleep(Duration::from_secs(retry_delay)).await;
if let Some(tx_hash) = tx_hash {
let receipt = provider.get_transaction_receipt(tx_hash).await?;
if receipt.is_some() {
return Ok(tx_hash);
}
}
// On retry, always fetch fresh fee estimates from the provider.
let priority_fee_res = provider.get_max_priority_fee_per_gas().await;
let gas_price_res = provider.get_gas_price().await;
if let (Ok(priority_fee), Ok(gas_price)) = (priority_fee_res, gas_price_res) {
// To replace a transaction, we need to bump both fees.
// A common strategy is to increase by a percentage (e.g., 20%).
let new_priority_fee = (priority_fee as f64 * 1.2).round() as u128;
let new_gas_price = (gas_price as f64 * 1.2).round() as u128;
info!(
"Retrying with bumped fees: max_fee={new_gas_price}, priority_fee={new_priority_fee}"
);
call = call
.clone()
.max_fee_per_gas(new_gas_price)
.max_priority_fee_per_gas(new_priority_fee)
} else {
warn!("Could not get new gas fees, retrying with old settings.");
}
}
match call.clone().send().await {
Ok(result) => {
debug!("Transaction sent, waiting for confirmation...");
tx_hash = Some(*result.tx_hash());
match result
.with_timeout(Some(PENDING_TRANSACTION_TIMEOUT))
.watch()
.await
{
Ok(hash) => return Ok(hash),
Err(err) => warn!("Transaction watch failed: {err:?}"),
}
}
Err(err) => {
warn!("Transaction send failed: {err:?}");
let err_str = err.to_string().to_lowercase();
if !err_str.contains("replacement transaction underpriced")
&& !err_str.contains("nonce too low")
&& !err_str.contains("transaction already imported")
{
return Err(anyhow::anyhow!("Non-retryable error: {:?}", err));
}
}
}
tries += 1;
}
Err(anyhow::anyhow!("Max retries reached"))
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use super::*;
use alloy::{primitives::U256, providers::ProviderBuilder, sol};
use alloy_provider::WalletProvider;
use anyhow::{Error, Result};
sol! {
#[allow(missing_docs)]
// solc v0.8.26; solc Counter.sol --via-ir --optimize --bin
#[sol(rpc, bytecode="6080806040523460135760df908160198239f35b600080fdfe6080806040526004361015601257600080fd5b60003560e01c9081633fb5c1cb1460925781638381f58a146079575063d09de08a14603c57600080fd5b3460745760003660031901126074576000546000198114605e57600101600055005b634e487b7160e01b600052601160045260246000fd5b600080fd5b3460745760003660031901126074576020906000548152f35b34607457602036600319011260745760043560005500fea2646970667358221220e978270883b7baed10810c4079c941512e93a7ba1cd1108c781d4bc738d9090564736f6c634300081a0033")]
contract Counter {
uint256 public number;
function setNumber(uint256 newNumber) public {
number = newNumber;
}
function increment() public {
number++;
}
}
}
#[tokio::test]
async fn test_concurrent_calls() -> Result<(), Error> {
let provider = Arc::new(
ProviderBuilder::new()
.with_simple_nonce_management()
.connect_anvil_with_wallet_and_config(|anvil| anvil.block_time(2))?,
);
let contract = Counter::deploy(provider.clone()).await?;
let handle_1 = tokio::spawn({
let contract = contract.clone();
let provider = provider.clone();
async move {
let call = contract.setNumber(U256::from(100));
retry_call(call, 3, provider, None).await
}
});
let handle_2 = tokio::spawn({
let contract = contract.clone();
let provider = provider.clone();
async move {
let call = contract.setNumber(U256::from(100));
retry_call(call, 3, provider, None).await
}
});
let tx_base = handle_1.await.unwrap();
let tx_one = handle_2.await.unwrap();
assert!(tx_base.is_ok());
assert!(tx_one.is_ok());
Ok(())
}
#[tokio::test]
async fn test_transaction_replacement() -> Result<(), Error> {
let provider = Arc::new(
ProviderBuilder::new()
.with_simple_nonce_management()
.connect_anvil_with_wallet_and_config(|anvil| anvil.block_time(5))?,
);
let contract = Counter::deploy(provider.clone()).await?;
let wallet = provider.wallet();
let tx_count = provider
.get_transaction_count(wallet.default_signer().address())
.await?;
let _ = contract.increment().nonce(tx_count).send().await?;
let call_two = contract.increment().nonce(tx_count);
let tx = retry_call(call_two, 3, provider, Some(1)).await;
assert!(tx.is_ok());
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/helpers/mod.rs | crates/shared/src/web3/contracts/helpers/mod.rs | pub mod utils;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/structs/compute_provider.rs | crates/shared/src/web3/contracts/structs/compute_provider.rs | use super::compute_node::ComputeNode;
use alloy::primitives::Address;
pub struct ComputeProvider {
pub provider_address: Address,
pub is_whitelisted: bool,
pub active_nodes: u32,
pub nodes: Vec<ComputeNode>,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/structs/rewards_distributor.rs | crates/shared/src/web3/contracts/structs/rewards_distributor.rs | use alloy::primitives::{Address, U256};
#[derive(Debug, Clone, PartialEq)]
pub struct NodeRewards {
pub claimable_tokens: U256,
pub locked_tokens: U256,
pub total_rewards: U256,
pub is_active: bool,
pub provider: Address,
}
#[derive(Debug, Clone)]
pub struct NodeBucketInfo {
pub last_24h: U256,
pub total_all: U256,
pub last_claimed: U256,
pub is_active: bool,
}
#[derive(Debug, Clone)]
pub struct PoolRewardsSummary {
pub total_claimable: U256,
pub total_locked: U256,
pub active_nodes: u32,
pub nodes: Vec<Address>,
pub node_rewards: Vec<NodeRewards>,
}
#[derive(Debug, Clone)]
pub struct RewardsDistributorInfo {
pub pool_id: U256,
pub reward_rate: U256,
pub reward_token: Address,
pub total_rewards_distributed: U256,
pub is_active: bool,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/structs/mod.rs | crates/shared/src/web3/contracts/structs/mod.rs | pub mod compute_node;
pub mod compute_pool;
pub mod compute_provider;
pub mod rewards_distributor;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/structs/compute_node.rs | crates/shared/src/web3/contracts/structs/compute_node.rs | use alloy::primitives::Address;
// TODO: Consider renaming as we have an internal compute node struct also
#[derive(Debug, Clone, PartialEq)]
pub struct ComputeNode {
pub provider: Address,
pub subkey: Address,
pub specs_uri: String,
pub compute_units: u32, // H100 equivalents
pub benchmark_score: u32, // some fidelity metric
pub is_active: bool,
pub is_validated: bool,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/structs/compute_pool.rs | crates/shared/src/web3/contracts/structs/compute_pool.rs | use alloy::primitives::Address;
use alloy::primitives::U256;
#[derive(Debug, PartialEq)]
pub enum PoolStatus {
PENDING,
ACTIVE,
COMPLETED,
}
#[derive(Debug)]
pub struct PoolInfo {
pub pool_id: U256,
pub domain_id: U256,
pub pool_name: String,
pub creator: Address,
pub compute_manager_key: Address,
pub creation_time: U256,
pub start_time: U256,
pub end_time: U256,
pub pool_data_uri: String,
pub pool_validation_logic: Address,
pub total_compute: U256,
pub compute_limit: U256,
pub status: PoolStatus,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/core/builder.rs | crates/shared/src/web3/contracts/core/builder.rs | use alloy::primitives::Address;
use crate::web3::contracts::{
core::error::ContractError, // Using custom error ContractError
implementations::{
ai_token_contract::AIToken, compute_pool_contract::ComputePool,
compute_registry_contract::ComputeRegistryContract,
domain_registry_contract::DomainRegistryContract,
prime_network_contract::PrimeNetworkContract, stake_manager::StakeManagerContract,
work_validators::synthetic_data_validator::SyntheticDataWorkValidator,
},
};
use std::option::Option;
use std::result::Result;
#[derive(Clone)]
pub struct Contracts<P: alloy_provider::Provider> {
pub compute_registry: ComputeRegistryContract<P>,
pub ai_token: AIToken<P>,
pub prime_network: PrimeNetworkContract<P>,
pub compute_pool: ComputePool<P>,
pub stake_manager: Option<StakeManagerContract<P>>,
pub synthetic_data_validator: Option<SyntheticDataWorkValidator<P>>,
pub domain_registry: Option<DomainRegistryContract<P>>,
}
pub struct ContractBuilder<P: alloy_provider::Provider + Clone> {
provider: P,
compute_registry: Option<ComputeRegistryContract<P>>,
ai_token: Option<AIToken<P>>,
prime_network: Option<PrimeNetworkContract<P>>,
compute_pool: Option<ComputePool<P>>,
stake_manager: Option<StakeManagerContract<P>>,
synthetic_data_validator: Option<SyntheticDataWorkValidator<P>>,
domain_registry: Option<DomainRegistryContract<P>>,
}
impl<P: alloy_provider::Provider + Clone> ContractBuilder<P> {
pub fn new(provider: P) -> Self {
Self {
provider,
compute_registry: None,
ai_token: None,
prime_network: None,
compute_pool: None,
stake_manager: None,
synthetic_data_validator: None,
domain_registry: None,
}
}
pub fn with_compute_registry(mut self) -> Self {
self.compute_registry = Some(ComputeRegistryContract::new(
self.provider.clone(),
"compute_registry.json",
));
self
}
pub fn with_ai_token(mut self) -> Self {
self.ai_token = Some(AIToken::new(self.provider.clone(), "ai_token.json"));
self
}
pub fn with_prime_network(mut self) -> Self {
self.prime_network = Some(PrimeNetworkContract::new(
self.provider.clone(),
"prime_network.json",
));
self
}
pub fn with_compute_pool(mut self) -> Self {
self.compute_pool = Some(ComputePool::new(self.provider.clone(), "compute_pool.json"));
self
}
pub fn with_synthetic_data_validator(mut self, address: Option<Address>) -> Self {
self.synthetic_data_validator = Some(SyntheticDataWorkValidator::new(
address.unwrap_or(Address::ZERO),
self.provider.clone(),
"synthetic_data_work_validator.json",
));
self
}
pub fn with_domain_registry(mut self) -> Self {
self.domain_registry = Some(DomainRegistryContract::new(
self.provider.clone(),
"domain_registry.json",
));
self
}
pub fn with_stake_manager(mut self) -> Self {
self.stake_manager = Some(StakeManagerContract::new(
self.provider.clone(),
"stake_manager.json",
));
self
}
// TODO: This is not ideal yet - now you have to init all contracts all the time
pub fn build(self) -> Result<Contracts<P>, ContractError> {
// Using custom error ContractError
Ok(Contracts {
compute_pool: match self.compute_pool {
Some(pool) => pool,
None => return Err(ContractError::Other("ComputePool not initialized".into())),
},
compute_registry: match self.compute_registry {
Some(registry) => registry,
None => {
return Err(ContractError::Other(
"ComputeRegistry not initialized".into(),
))
} // Custom error handling
},
ai_token: match self.ai_token {
Some(token) => token,
None => return Err(ContractError::Other("AIToken not initialized".into())), // Custom error handling
},
prime_network: match self.prime_network {
Some(network) => network,
None => return Err(ContractError::Other("PrimeNetwork not initialized".into())), // Custom error handling
},
synthetic_data_validator: self.synthetic_data_validator,
domain_registry: self.domain_registry,
stake_manager: self.stake_manager,
})
}
pub fn build_partial(&self) -> Result<Contracts<P>, ContractError> {
Ok(Contracts {
compute_registry: self
.compute_registry
.as_ref()
.ok_or_else(|| ContractError::Other("ComputeRegistry not initialized".into()))?
.clone(),
ai_token: self
.ai_token
.as_ref()
.ok_or_else(|| ContractError::Other("AIToken not initialized".into()))?
.clone(),
prime_network: self
.prime_network
.as_ref()
.ok_or_else(|| ContractError::Other("PrimeNetwork not initialized".into()))?
.clone(),
compute_pool: self
.compute_pool
.as_ref()
.ok_or_else(|| ContractError::Other("ComputePool not initialized".into()))?
.clone(),
domain_registry: Some(
self.domain_registry
.as_ref()
.ok_or_else(|| ContractError::Other("DomainRegistry not initialized".into()))?
.clone(),
),
stake_manager: Some(
self.stake_manager
.as_ref()
.ok_or_else(|| ContractError::Other("StakeManager not initialized".into()))?
.clone(),
),
synthetic_data_validator: None,
})
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/core/contract.rs | crates/shared/src/web3/contracts/core/contract.rs | use alloy::{
contract::{ContractInstance, Interface},
primitives::Address,
};
use std::include_bytes;
macro_rules! include_abi {
($path:expr) => {{
const ABI_BYTES: &[u8] = include_bytes!($path);
ABI_BYTES
}};
}
#[derive(Clone)]
pub struct Contract<P: alloy_provider::Provider> {
instance: ContractInstance<P>,
}
impl<P: alloy_provider::Provider> Contract<P> {
pub fn new(address: Address, provider: P, abi_file_path: &str) -> Self {
let instance = Self::parse_abi(abi_file_path, provider, address);
Self { instance }
}
fn parse_abi(path: &str, provider: P, address: Address) -> ContractInstance<P> {
let artifact = match path {
"compute_registry.json" => {
include_abi!("../../../../artifacts/abi/compute_registry.json")
}
"ai_token.json" => include_abi!("../../../../artifacts/abi/ai_token.json"),
"prime_network.json" => include_abi!("../../../../artifacts/abi/prime_network.json"),
"compute_pool.json" => include_abi!("../../../../artifacts/abi/compute_pool.json"),
"rewards_distributor.json" => {
include_abi!("../../../../artifacts/abi/rewards_distributor.json")
}
"synthetic_data_work_validator.json" => {
include_abi!("../../../../artifacts/abi/synthetic_data_work_validator.json")
}
"stake_manager.json" => include_abi!("../../../../artifacts/abi/stake_manager.json"),
"domain_registry.json" => {
include_abi!("../../../../artifacts/abi/domain_registry.json")
}
_ => panic!("Unknown ABI file: {path}"),
};
let abi_json: serde_json::Value = serde_json::from_slice(artifact)
.map_err(|err| {
eprintln!("Failed to parse JSON: {err}");
std::process::exit(1);
})
.unwrap_or_else(|_| {
eprintln!("Error parsing JSON, exiting.");
std::process::exit(1);
});
let abi =
serde_json::from_value(abi_json.clone()).expect("Failed to parse ABI from artifact");
ContractInstance::new(address, provider, Interface::new(abi))
}
pub fn instance(&self) -> &ContractInstance<P> {
&self.instance
}
pub fn provider(&self) -> &P {
self.instance.provider()
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/core/error.rs | crates/shared/src/web3/contracts/core/error.rs | use alloy::primitives::Address;
use std::fmt;
#[derive(Debug)]
pub enum ContractError {
// Initialization errors
AbiParseError(String),
ArtifactReadError(String),
// Contract interaction errors
CallError(String),
TransactionError(String),
// Data parsing errors
DecodingError(String),
InvalidResponse(String),
// Business logic errors
ProviderNotFound(Address),
NodeNotRegistered { provider: Address, node: Address },
InvalidProviderState(String),
// Generic errors
Web3Error(String),
Other(Box<dyn std::error::Error + Send + Sync>),
}
impl std::error::Error for ContractError {}
impl fmt::Display for ContractError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
// Initialization errors
ContractError::AbiParseError(msg) => write!(f, "Failed to parse ABI: {msg}"),
ContractError::ArtifactReadError(msg) => write!(f, "Failed to read artifact: {msg}"),
// Contract interaction errors
ContractError::CallError(msg) => write!(f, "Contract call failed: {msg}"),
ContractError::TransactionError(msg) => write!(f, "Transaction failed: {msg}"),
// Data parsing errors
ContractError::DecodingError(msg) => write!(f, "Failed to decode data: {msg}"),
ContractError::InvalidResponse(msg) => write!(f, "Invalid contract response: {msg}"),
// Business logic errors
ContractError::ProviderNotFound(address) => {
write!(f, "Provider not found: {address:?}")
}
ContractError::NodeNotRegistered { provider, node } => {
write!(f, "Node {node:?} not registered for provider {provider:?}")
}
ContractError::InvalidProviderState(msg) => {
write!(f, "Invalid provider state: {msg}")
}
// Generic errors
ContractError::Web3Error(msg) => write!(f, "Web3 error: {msg}"),
ContractError::Other(e) => write!(f, "Other error: {e}"),
}
}
}
// Convenient type alias for Result with ContractError
pub type ContractResult<T> = Result<T, ContractError>;
// Conversion implementations for common error types
impl From<std::io::Error> for ContractError {
fn from(err: std::io::Error) -> Self {
ContractError::ArtifactReadError(err.to_string())
}
}
impl From<serde_json::Error> for ContractError {
fn from(err: serde_json::Error) -> Self {
ContractError::AbiParseError(err.to_string())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/core/mod.rs | crates/shared/src/web3/contracts/core/mod.rs | pub mod builder;
pub mod contract;
pub mod error;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/compute_pool_contract.rs | crates/shared/src/web3/contracts/implementations/compute_pool_contract.rs | use crate::web3::contracts::constants::addresses::COMPUTE_POOL_ADDRESS;
use crate::web3::contracts::core::contract::Contract;
use crate::web3::contracts::helpers::utils::{get_selector, PrimeCallBuilder};
use crate::web3::contracts::implementations::rewards_distributor_contract::RewardsDistributor;
use crate::web3::contracts::structs::compute_pool::{PoolInfo, PoolStatus};
use crate::web3::contracts::structs::rewards_distributor::{NodeRewards, PoolRewardsSummary};
use crate::web3::wallet::WalletProvider;
use alloy::dyn_abi::DynSolValue;
use alloy::primitives::{Address, FixedBytes, U256};
#[derive(Clone)]
pub struct ComputePool<P: alloy_provider::Provider> {
pub instance: Contract<P>,
}
impl<P: alloy_provider::Provider> ComputePool<P> {
pub fn new(provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(COMPUTE_POOL_ADDRESS, provider, abi_file_path);
Self { instance }
}
pub async fn get_pool_info(
&self,
pool_id: U256,
) -> Result<PoolInfo, Box<dyn std::error::Error>> {
let pool_info_response = self
.instance
.instance()
.function("getComputePool", &[pool_id.into()])?
.call()
.await?;
let pool_info_tuple: &[DynSolValue] =
pool_info_response.first().unwrap().as_tuple().unwrap();
// Check if pool exists by looking at creator and compute manager addresses
if pool_info_tuple[3].as_address().unwrap() == Address::ZERO
&& pool_info_tuple[4].as_address().unwrap() == Address::ZERO
{
return Err("Pool does not exist".into());
}
let pool_id: U256 = pool_info_tuple[0].as_uint().unwrap().0;
let domain_id: U256 = pool_info_tuple[1].as_uint().unwrap().0;
let name: String = pool_info_tuple[2].as_str().unwrap().to_string();
let creator: Address = pool_info_tuple[3].as_address().unwrap();
let compute_manager_key: Address = pool_info_tuple[4].as_address().unwrap();
let creation_time: U256 = pool_info_tuple[5].as_uint().unwrap().0;
let start_time: U256 = pool_info_tuple[6].as_uint().unwrap().0;
let end_time: U256 = pool_info_tuple[7].as_uint().unwrap().0;
let pool_data_uri: String = pool_info_tuple[8].as_str().unwrap().to_string();
let pool_validation_logic: Address = pool_info_tuple[9].as_address().unwrap();
let total_compute: U256 = pool_info_tuple[10].as_uint().unwrap().0;
let compute_limit: U256 = pool_info_tuple[11].as_uint().unwrap().0;
let status: U256 = pool_info_tuple[12].as_uint().unwrap().0;
let status: u8 = status.try_into().expect("Failed to convert status to u8");
let mapped_status = match status {
0 => PoolStatus::PENDING,
1 => PoolStatus::ACTIVE,
2 => PoolStatus::COMPLETED,
_ => panic!("Unknown status value: {status}"),
};
let pool_info = PoolInfo {
pool_id,
domain_id,
pool_name: name,
creator,
compute_manager_key,
creation_time,
start_time,
end_time,
pool_data_uri,
pool_validation_logic,
total_compute,
compute_limit,
status: mapped_status,
};
Ok(pool_info)
}
pub async fn is_node_blacklisted(
&self,
pool_id: u32,
node: Address,
) -> Result<bool, Box<dyn std::error::Error>> {
let arg_pool_id: U256 = U256::from(pool_id);
let result = self
.instance
.instance()
.function(
"isNodeBlacklistedFromPool",
&[arg_pool_id.into(), node.into()],
)?
.call()
.await?;
let is_blacklisted = result
.first()
.ok_or("Missing blacklist status in response")?
.as_bool()
.ok_or("Failed to parse blacklist status as bool")?;
Ok(is_blacklisted)
}
pub async fn get_blacklisted_nodes(
&self,
pool_id: u32,
) -> Result<Vec<Address>, Box<dyn std::error::Error>> {
let arg_pool_id: U256 = U256::from(pool_id);
let result = self
.instance
.instance()
.function("getBlacklistedNodes", &[arg_pool_id.into()])?
.call()
.await?;
let blacklisted_nodes = result
.first()
.unwrap()
.as_array()
.unwrap()
.iter()
.map(|node| node.as_address().unwrap())
.collect();
Ok(blacklisted_nodes)
}
pub async fn is_node_in_pool(
&self,
pool_id: u32,
node: Address,
) -> Result<bool, Box<dyn std::error::Error>> {
let arg_pool_id: U256 = U256::from(pool_id);
let result = self
.instance
.instance()
.function("isNodeInPool", &[arg_pool_id.into(), node.into()])?
.call()
.await?;
let is_in_pool = result
.first()
.ok_or("Missing node-in-pool status in response")?
.as_bool()
.ok_or("Failed to parse node-in-pool status as bool")?;
Ok(is_in_pool)
}
/// Get the rewards distributor address for a specific pool
pub async fn get_reward_distributor_address(
&self,
pool_id: U256,
) -> Result<Address, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("getRewardDistributorForPool", &[pool_id.into()])?
.call()
.await?;
let address = result
.first()
.ok_or("Missing rewards distributor address in response")?
.as_address()
.ok_or("Failed to parse rewards distributor address")?;
Ok(address)
}
/// Calculate rewards for a specific node in a pool
pub async fn calculate_node_rewards(
&self,
pool_id: U256,
node: Address,
) -> Result<(U256, U256), Box<dyn std::error::Error>> {
let distributor_address = self.get_reward_distributor_address(pool_id).await?;
let rewards_distributor = RewardsDistributor::new(
distributor_address,
self.instance.provider(),
"rewards_distributor.json",
);
rewards_distributor.calculate_rewards(node).await
}
/// Get detailed rewards information for a node in a pool
pub async fn get_node_rewards_details(
&self,
pool_id: U256,
node: Address,
provider: Address,
) -> Result<NodeRewards, Box<dyn std::error::Error>> {
let distributor_address = self.get_reward_distributor_address(pool_id).await?;
let rewards_distributor = RewardsDistributor::new(
distributor_address,
self.instance.provider(),
"rewards_distributor.json",
);
rewards_distributor
.get_node_rewards_details(node, provider)
.await
}
/// Calculate rewards for all nodes in a pool
pub async fn calculate_pool_rewards(
&self,
pool_id: U256,
) -> Result<PoolRewardsSummary, Box<dyn std::error::Error>> {
// Get all nodes in the pool
let nodes = self.get_compute_pool_nodes(pool_id).await?;
let distributor_address = self.get_reward_distributor_address(pool_id).await?;
let rewards_distributor = RewardsDistributor::new(
distributor_address,
self.instance.provider(),
"rewards_distributor.json",
);
let mut summary = PoolRewardsSummary {
total_claimable: U256::ZERO,
total_locked: U256::ZERO,
active_nodes: 0,
nodes: nodes.clone(),
node_rewards: Vec::new(),
};
for node in &nodes {
let (claimable, locked) = rewards_distributor.calculate_rewards(*node).await?;
let node_info = rewards_distributor.get_node_info(*node).await?;
let node_rewards = NodeRewards {
claimable_tokens: claimable,
locked_tokens: locked,
total_rewards: claimable + locked,
is_active: node_info.is_active,
provider: Address::ZERO, // This would need to be fetched from compute registry
};
summary.total_claimable += claimable;
summary.total_locked += locked;
if node_info.is_active {
summary.active_nodes += 1;
}
summary.node_rewards.push(node_rewards);
}
Ok(summary)
}
/// Check if a node has claimable rewards in a pool
pub async fn has_claimable_rewards(
&self,
pool_id: U256,
node: Address,
) -> Result<bool, Box<dyn std::error::Error>> {
let distributor_address = self.get_reward_distributor_address(pool_id).await?;
let rewards_distributor = RewardsDistributor::new(
distributor_address,
self.instance.provider(),
"rewards_distributor.json",
);
rewards_distributor.has_claimable_rewards(node).await
}
/// Get the reward rate for a specific pool
pub async fn get_pool_reward_rate(
&self,
pool_id: U256,
) -> Result<U256, Box<dyn std::error::Error>> {
let distributor_address = self.get_reward_distributor_address(pool_id).await?;
let rewards_distributor = RewardsDistributor::new(
distributor_address,
self.instance.provider(),
"rewards_distributor.json",
);
rewards_distributor.get_reward_rate().await
}
/// Get all nodes in a compute pool
pub async fn get_compute_pool_nodes(
&self,
pool_id: U256,
) -> Result<Vec<Address>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("getComputePoolNodes", &[pool_id.into()])?
.call()
.await?;
let nodes_array = result
.first()
.ok_or("Missing nodes array in response")?
.as_array()
.ok_or("Failed to parse nodes as array")?;
let mut nodes = Vec::new();
for node_value in nodes_array {
let node_address = node_value
.as_address()
.ok_or("Failed to parse node address")?;
nodes.push(node_address);
}
Ok(nodes)
}
}
impl ComputePool<WalletProvider> {
pub fn build_join_compute_pool_call(
&self,
pool_id: U256,
provider_address: Address,
nodes: Vec<Address>,
nonces: Vec<[u8; 32]>,
expirations: Vec<[u8; 32]>,
signatures: Vec<FixedBytes<65>>,
) -> Result<PrimeCallBuilder<'_, alloy::json_abi::Function>, Box<dyn std::error::Error>> {
let join_compute_pool_selector =
get_selector("joinComputePool(uint256,address,address[],uint256[],uint256[],bytes[])");
let address = DynSolValue::from(
nodes
.iter()
.map(|addr| DynSolValue::from(*addr))
.collect::<Vec<_>>(),
);
let nonces = DynSolValue::from(
nonces
.iter()
.map(|nonce| DynSolValue::from(U256::from_be_bytes(*nonce)))
.collect::<Vec<_>>(),
);
let expirations = DynSolValue::from(
expirations
.iter()
.map(|exp| DynSolValue::from(U256::from_be_bytes(*exp)))
.collect::<Vec<_>>(),
);
let signatures = DynSolValue::from(
signatures
.iter()
.map(|sig| DynSolValue::Bytes(sig.to_vec()))
.collect::<Vec<_>>(),
);
let call = self.instance.instance().function_from_selector(
&join_compute_pool_selector,
&[
pool_id.into(),
provider_address.into(),
address,
nonces,
expirations,
signatures,
],
)?;
Ok(call)
}
pub async fn leave_compute_pool(
&self,
pool_id: U256,
provider_address: Address,
node: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let leave_compute_pool_selector = get_selector("leaveComputePool(uint256,address,address)");
let result = self
.instance
.instance()
.function_from_selector(
&leave_compute_pool_selector,
&[pool_id.into(), provider_address.into(), node.into()],
)?
.send()
.await?
.watch()
.await?;
Ok(result)
}
pub async fn eject_node(
&self,
pool_id: u32,
node: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
println!("Ejecting node");
let arg_pool_id: U256 = U256::from(pool_id);
let result = self
.instance
.instance()
.function("ejectNode", &[arg_pool_id.into(), node.into()])?
.send()
.await?
.watch()
.await?;
println!("Result: {result:?}");
Ok(result)
}
pub fn build_work_submission_call(
&self,
pool_id: U256,
node: Address,
data: Vec<u8>,
work_units: U256,
) -> Result<PrimeCallBuilder<'_, alloy::json_abi::Function>, Box<dyn std::error::Error>> {
// Extract the work key from the first 32 bytes
// Create a new data vector with work key and work units (set to 1)
let mut submit_data = Vec::with_capacity(64);
submit_data.extend_from_slice(&data[0..32]); // Work key
submit_data.extend_from_slice(&work_units.to_be_bytes::<32>());
let call = self.instance.instance().function(
"submitWork",
&[pool_id.into(), node.into(), submit_data.into()],
)?;
Ok(call)
}
pub async fn blacklist_node(
&self,
pool_id: u32,
node: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
println!("Blacklisting node");
let arg_pool_id: U256 = U256::from(pool_id);
let result = self
.instance
.instance()
.function("blacklistNode", &[arg_pool_id.into(), node.into()])?
.send()
.await?
.watch()
.await?;
println!("Result: {result:?}");
Ok(result)
}
pub async fn create_compute_pool(
&self,
domain_id: U256,
compute_manager_key: Address,
pool_name: String,
pool_data_uri: String,
compute_limit: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function(
"createComputePool",
&[
domain_id.into(),
compute_manager_key.into(),
pool_name.into(),
pool_data_uri.into(),
compute_limit.into(),
],
)?
.send()
.await?
.watch()
.await?;
Ok(result)
}
pub async fn start_compute_pool(
&self,
pool_id: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("startComputePool", &[pool_id.into()])?
.send()
.await?
.watch()
.await?;
Ok(result)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/prime_network_contract.rs | crates/shared/src/web3/contracts/implementations/prime_network_contract.rs | use crate::web3::contracts::constants::addresses::PRIME_NETWORK_ADDRESS;
use crate::web3::contracts::core::contract::Contract;
use crate::web3::wallet::WalletProvider;
use alloy::dyn_abi::DynSolValue;
use alloy::primitives::{keccak256, Address, FixedBytes, U256};
use alloy_provider::Provider as _;
use anyhow::Error;
#[derive(Clone)]
pub struct PrimeNetworkContract<P: alloy_provider::Provider> {
pub instance: Contract<P>,
}
impl<P: alloy_provider::Provider> PrimeNetworkContract<P> {
pub fn new(provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(PRIME_NETWORK_ADDRESS, provider, abi_file_path);
Self { instance }
}
pub async fn get_validator_role(&self) -> Result<Vec<Address>, Error> {
let hash = keccak256(b"VALIDATOR_ROLE");
let value = DynSolValue::FixedBytes(hash, 32);
let members = self
.instance
.instance()
.function("getRoleMembers", &[value])?
.call()
.await?;
let mut members_vec = Vec::new();
for member in members {
if let Some(array) = member.as_array() {
for address in array {
if let Some(addr) = address.as_address() {
members_vec.push(addr);
} else {
return Err(Error::msg("Failed to convert member to address"));
}
}
} else {
return Err(Error::msg("Member is not an array"));
}
}
Ok(members_vec)
}
}
impl PrimeNetworkContract<WalletProvider> {
pub async fn register_provider(
&self,
stake: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let register_tx = self
.instance
.instance()
.function("registerProvider", &[stake.into()])?
.send()
.await?
.watch()
.await?;
Ok(register_tx)
}
pub async fn stake(
&self,
additional_stake: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let stake_tx = self
.instance
.instance()
.function("increaseStake", &[additional_stake.into()])?
.send()
.await?
.watch()
.await?;
Ok(stake_tx)
}
pub async fn add_compute_node(
&self,
node_address: Address,
compute_units: U256,
signature: Vec<u8>,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let add_node_tx = self
.instance
.instance()
.function(
"addComputeNode",
&[
node_address.into(),
"ipfs://nodekey/".to_string().into(),
compute_units.into(),
DynSolValue::Bytes(signature.to_vec()),
],
)?
.send()
.await?
.watch()
.await?;
Ok(add_node_tx)
}
pub async fn remove_compute_node(
&self,
provider_address: Address,
node_address: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let remove_node_tx = self
.instance
.instance()
.function(
"removeComputeNode",
&[provider_address.into(), node_address.into()],
)?
.send()
.await?
.watch()
.await?;
Ok(remove_node_tx)
}
pub async fn validate_node(
&self,
provider_address: Address,
node_address: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let validate_node_tx = self
.instance
.instance()
.function(
"validateNode",
&[provider_address.into(), node_address.into()],
)?
.send()
.await?
.watch()
.await?;
Ok(validate_node_tx)
}
pub async fn create_domain(
&self,
domain_name: String,
validation_logic: Address,
domain_uri: String,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let create_domain_tx = self
.instance
.instance()
.function(
"createDomain",
&[
domain_name.into(),
validation_logic.into(),
domain_uri.into(),
],
)?
.send()
.await?
.watch()
.await?;
Ok(create_domain_tx)
}
pub async fn update_validation_logic(
&self,
domain_id: U256,
validation_logic: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let update_validation_logic_tx = self
.instance
.instance()
.function(
"updateDomainValidationLogic",
&[domain_id.into(), validation_logic.into()],
)?
.send()
.await?
.watch()
.await?;
Ok(update_validation_logic_tx)
}
pub async fn set_stake_minimum(
&self,
min_stake_amount: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let set_stake_minimum_tx = self
.instance
.instance()
.function("setStakeMinimum", &[min_stake_amount.into()])?
.send()
.await?
.watch()
.await?;
Ok(set_stake_minimum_tx)
}
pub async fn whitelist_provider(
&self,
provider_address: Address,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let whitelist_provider_tx = self
.instance
.instance()
.function("whitelistProvider", &[provider_address.into()])?
.send()
.await?
.watch()
.await?;
let _ = self
.instance
.provider()
.get_transaction_receipt(whitelist_provider_tx)
.await?;
Ok(whitelist_provider_tx)
}
pub async fn invalidate_work(
&self,
pool_id: U256,
penalty: U256,
data: Vec<u8>,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let invalidate_work_tx = self
.instance
.instance()
.function(
"invalidateWork",
&[pool_id.into(), penalty.into(), data.into()],
)?
.send()
.await?
.watch()
.await?;
Ok(invalidate_work_tx)
}
pub async fn soft_invalidate_work(
&self,
pool_id: U256,
data: Vec<u8>,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let soft_invalidate_work_tx = self
.instance
.instance()
.function("softInvalidateWork", &[pool_id.into(), data.into()])?
.send()
.await?
.watch()
.await?;
Ok(soft_invalidate_work_tx)
}
pub async fn reclaim_stake(
&self,
amount: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let reclaim_tx = self
.instance
.instance()
.function("reclaimStake", &[amount.into()])?
.send()
.await?
.watch()
.await?;
Ok(reclaim_tx)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::web3::wallet::Wallet;
use url::Url;
#[tokio::test]
#[ignore = "This test requires a running blockchain with deployed contracts"]
async fn test_get_validator_role() {
// This test requires:
// 1. A running local blockchain (e.g. anvil or ganache) at http://localhost:8545
// 2. The PrimeNetwork contract deployed with known address
// 3. At least one validator role assigned
let wallet = Wallet::new(
"0x0000000000000000000000000000000000000000000000000000000000000001",
Url::parse("http://localhost:8545").unwrap(),
)
.unwrap();
let prime_network_contract =
PrimeNetworkContract::new(wallet.provider, "prime_network.json");
let validators = prime_network_contract.get_validator_role().await.unwrap();
assert_eq!(validators.len(), 1, "Expected exactly one validator");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/ai_token_contract.rs | crates/shared/src/web3/contracts/implementations/ai_token_contract.rs | use crate::web3::contracts::constants::addresses::{AI_TOKEN_ADDRESS, PRIME_NETWORK_ADDRESS};
use crate::web3::contracts::core::contract::Contract;
use crate::web3::contracts::helpers::utils::PrimeCallBuilder;
use crate::web3::wallet::WalletProvider;
use alloy::primitives::{Address, FixedBytes, U256};
#[derive(Clone)]
pub struct AIToken<P: alloy_provider::Provider> {
pub instance: Contract<P>,
}
impl<P: alloy_provider::Provider> AIToken<P> {
pub fn new(provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(AI_TOKEN_ADDRESS, provider, abi_file_path);
Self { instance }
}
pub async fn balance_of(&self, account: Address) -> Result<U256, Box<dyn std::error::Error>> {
let balance: U256 = self
.instance
.instance()
.function("balanceOf", &[account.into()])?
.call()
.await?
.into_iter()
.next()
.map(|value| value.as_uint().unwrap_or_default())
.unwrap_or_default()
.0;
Ok(balance)
}
}
impl AIToken<WalletProvider> {
/// Approves the specified amount of tokens to be spent by the PRIME network address.
///
/// # Parameters
/// - `amount`: The amount of tokens to approve for spending.
///
/// # Returns
/// - `Result<(), Box<dyn std::error::Error>>`: Returns `Ok(())` if the approval transaction is successful,
/// or an error if the transaction fails.
pub async fn approve(
&self,
amount: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let tx = self
.instance
.instance()
.function("approve", &[PRIME_NETWORK_ADDRESS.into(), amount.into()])?
.send()
.await?
.watch()
.await?;
Ok(tx)
}
pub fn build_mint_call(
&self,
to: Address,
amount: U256,
) -> Result<PrimeCallBuilder<'_, alloy::json_abi::Function>, Box<dyn std::error::Error>> {
let call = self
.instance
.instance()
.function("mint", &[to.into(), amount.into()])?;
Ok(call)
}
pub async fn mint(
&self,
to: Address,
amount: U256,
) -> Result<FixedBytes<32>, Box<dyn std::error::Error>> {
let call = self.build_mint_call(to, amount)?;
let tx = call.send().await?.watch().await?;
Ok(tx)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/compute_registry_contract.rs | crates/shared/src/web3/contracts/implementations/compute_registry_contract.rs | use super::{
super::constants::addresses::COMPUTE_REGISTRY_ADDRESS, super::core::contract::Contract,
super::structs::compute_provider::ComputeProvider,
};
use crate::web3::contracts::helpers::utils::get_selector;
use alloy::dyn_abi::DynSolValue;
use alloy::primitives::{Address, U256};
use anyhow::Result;
#[derive(Clone)]
pub struct ComputeRegistryContract<P: alloy_provider::Provider> {
instance: Contract<P>,
}
impl<P: alloy_provider::Provider> ComputeRegistryContract<P> {
pub fn new(provider: P, abi_path: &str) -> Self {
let instance = Contract::new(COMPUTE_REGISTRY_ADDRESS, provider, abi_path);
Self { instance }
}
pub async fn get_provider(
&self,
address: Address,
) -> Result<ComputeProvider, Box<dyn std::error::Error>> {
let provider_response = self
.instance
.instance()
.function("getProvider", &[address.into()])?
.call()
.await?;
let provider_tuple: &[DynSolValue] = provider_response.first().unwrap().as_tuple().unwrap();
let provider_address: Address = provider_tuple[0].as_address().unwrap();
let is_whitelisted: bool = provider_tuple[1].as_bool().unwrap();
let provider = ComputeProvider {
provider_address,
is_whitelisted,
active_nodes: 0,
nodes: vec![],
};
Ok(provider)
}
pub async fn get_provider_total_compute(
&self,
address: Address,
) -> Result<U256, Box<dyn std::error::Error>> {
let provider_response = self
.instance
.instance()
.function("getProviderTotalCompute", &[address.into()])?
.call()
.await?;
Ok(U256::from(
provider_response.first().unwrap().as_uint().unwrap().0,
))
}
pub async fn get_node(
&self,
provider_address: Address,
node_address: Address,
) -> anyhow::Result<(bool, bool)> {
let get_node_selector = get_selector("getNode(address,address)");
let node_response = self
.instance
.instance()
.function_from_selector(
&get_node_selector,
&[provider_address.into(), node_address.into()],
)?
.call()
.await?;
if let Some(_node_data) = node_response.first() {
let node_tuple = _node_data.as_tuple().unwrap();
// Check that provider and subkey match
let node_provider = node_tuple[0].as_address().unwrap();
let node_subkey = node_tuple[1].as_address().unwrap();
if node_provider != provider_address || node_subkey != node_address {
return Err(anyhow::anyhow!("Node does not match provider or subkey"));
}
let active = node_tuple[5].as_bool().unwrap();
let validated = node_tuple[6].as_bool().unwrap();
Ok((active, validated))
} else {
Err(anyhow::anyhow!("Node is not registered"))
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/rewards_distributor_contract.rs | crates/shared/src/web3/contracts/implementations/rewards_distributor_contract.rs | use crate::web3::contracts::core::contract::Contract;
use crate::web3::contracts::structs::rewards_distributor::{NodeBucketInfo, NodeRewards};
use crate::web3::wallet::WalletProvider;
use alloy::primitives::{Address, U256};
#[derive(Clone)]
pub struct RewardsDistributor<P: alloy_provider::Provider> {
pub instance: Contract<P>,
}
impl<P: alloy_provider::Provider> RewardsDistributor<P> {
pub fn new(rewards_distributor_address: Address, provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(rewards_distributor_address, provider, abi_file_path);
Self { instance }
}
/// Calculate rewards for a specific node
/// Returns (claimable_tokens, locked_tokens)
pub async fn calculate_rewards(
&self,
node: Address,
) -> Result<(U256, U256), Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("calculateRewards", &[node.into()])?
.call()
.await?;
let claimable = result
.first()
.ok_or("Missing claimable rewards in response")?
.as_uint()
.ok_or("Failed to parse claimable rewards as uint")?
.0;
let locked = result
.get(1)
.ok_or("Missing locked rewards in response")?
.as_uint()
.ok_or("Failed to parse locked rewards as uint")?
.0;
Ok((claimable, locked))
}
/// Get detailed node information including bucket data
pub async fn get_node_info(
&self,
node: Address,
) -> Result<NodeBucketInfo, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("nodeInfo", &[node.into()])?
.call()
.await?;
let last_24h = result
.first()
.ok_or("Missing last_24h in nodeInfo response")?
.as_uint()
.ok_or("Failed to parse last_24h as uint")?
.0;
let total_all = result
.get(1)
.ok_or("Missing total_all in nodeInfo response")?
.as_uint()
.ok_or("Failed to parse total_all as uint")?
.0;
let last_claimed = result
.get(2)
.ok_or("Missing last_claimed in nodeInfo response")?
.as_uint()
.ok_or("Failed to parse last_claimed as uint")?
.0;
let is_active = result
.get(3)
.ok_or("Missing is_active in nodeInfo response")?
.as_bool()
.ok_or("Failed to parse is_active as bool")?;
Ok(NodeBucketInfo {
last_24h,
total_all,
last_claimed,
is_active,
})
}
/// Get the current reward rate per unit
pub async fn get_reward_rate(&self) -> Result<U256, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("getRewardRate", &[])?
.call()
.await?;
let rate = result
.first()
.ok_or("Missing reward rate in response")?
.as_uint()
.ok_or("Failed to parse reward rate as uint")?
.0;
Ok(rate)
}
/// Check if a node has any claimable rewards
pub async fn has_claimable_rewards(
&self,
node: Address,
) -> Result<bool, Box<dyn std::error::Error>> {
let (claimable, _) = self.calculate_rewards(node).await?;
Ok(claimable > U256::ZERO)
}
/// Get detailed rewards information for a node
pub async fn get_node_rewards_details(
&self,
node: Address,
provider: Address,
) -> Result<NodeRewards, Box<dyn std::error::Error>> {
let (claimable, locked) = self.calculate_rewards(node).await?;
let node_info = self.get_node_info(node).await?;
Ok(NodeRewards {
claimable_tokens: claimable,
locked_tokens: locked,
total_rewards: claimable + locked,
is_active: node_info.is_active,
provider,
})
}
}
impl RewardsDistributor<WalletProvider> {
/// Claim rewards for a node (only callable by node provider)
pub async fn claim_rewards(
&self,
node: Address,
) -> Result<alloy::primitives::FixedBytes<32>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("claimRewards", &[node.into()])?
.send()
.await?
.watch()
.await?;
Ok(result)
}
/// Set the reward rate (only callable by rewards manager)
pub async fn set_reward_rate(
&self,
new_rate: U256,
) -> Result<alloy::primitives::FixedBytes<32>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("setRewardRate", &[new_rate.into()])?
.send()
.await?
.watch()
.await?;
Ok(result)
}
/// Slash pending rewards for a node (only callable by rewards manager or compute pool)
pub async fn slash_pending_rewards(
&self,
node: Address,
) -> Result<alloy::primitives::FixedBytes<32>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("slashPendingRewards", &[node.into()])?
.send()
.await?
.watch()
.await?;
Ok(result)
}
/// Remove specific work units from a node (soft slash)
pub async fn remove_work(
&self,
node: Address,
work_units: U256,
) -> Result<alloy::primitives::FixedBytes<32>, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("removeWork", &[node.into(), work_units.into()])?
.send()
.await?
.watch()
.await?;
Ok(result)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/mod.rs | crates/shared/src/web3/contracts/implementations/mod.rs | pub mod ai_token_contract;
pub mod compute_pool_contract;
pub mod compute_registry_contract;
pub mod domain_registry_contract;
pub mod prime_network_contract;
pub mod rewards_distributor_contract;
pub mod stake_manager;
pub mod work_validators;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/domain_registry_contract.rs | crates/shared/src/web3/contracts/implementations/domain_registry_contract.rs | use crate::web3::contracts::constants::addresses::DOMAIN_REGISTRY_ADDRESS;
use crate::web3::contracts::core::contract::Contract;
use alloy::dyn_abi::DynSolValue;
use alloy::primitives::{Address, U256};
use anyhow::Error;
pub struct Domain {
pub domain_id: U256,
pub name: String,
pub validation_logic: Address,
pub domain_parameters_uri: String,
}
#[derive(Clone)]
pub struct DomainRegistryContract<P: alloy_provider::Provider> {
instance: Contract<P>,
}
impl<P: alloy_provider::Provider> DomainRegistryContract<P> {
pub fn new(provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(DOMAIN_REGISTRY_ADDRESS, provider, abi_file_path);
Self { instance }
}
pub async fn get_domain(&self, domain_id: u32) -> Result<Domain, Error> {
let result = self
.instance
.instance()
.function("get", &[U256::from(domain_id).into()])?
.call()
.await?;
let domain_info_tuple: &[DynSolValue] = result
.first()
.ok_or_else(|| Error::msg("Failed to get domain info tuple"))?
.as_tuple()
.ok_or_else(|| Error::msg("Failed to convert to tuple"))?;
let domain_id: U256 = domain_info_tuple[0]
.as_uint()
.ok_or_else(|| Error::msg("Failed to get domain ID"))?
.0;
let name: String = domain_info_tuple[1]
.as_str()
.ok_or_else(|| Error::msg("Failed to get domain name"))?
.to_string();
let validation_logic: Address = domain_info_tuple[2]
.as_address()
.ok_or_else(|| Error::msg("Failed to get validation logic address"))?;
let domain_parameters_uri: String = domain_info_tuple[3]
.as_str()
.ok_or_else(|| Error::msg("Failed to get domain parameters URI"))?
.to_string();
Ok(Domain {
domain_id,
name,
validation_logic,
domain_parameters_uri,
})
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/stake_manager.rs | crates/shared/src/web3/contracts/implementations/stake_manager.rs | use crate::web3::contracts::constants::addresses::STAKE_MANAGER_ADDRESS;
use crate::web3::contracts::core::contract::Contract;
use alloy::primitives::{Address, U256};
#[derive(Clone)]
pub struct StakeManagerContract<P: alloy_provider::Provider> {
instance: Contract<P>,
}
impl<P: alloy_provider::Provider> StakeManagerContract<P> {
pub fn new(provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(STAKE_MANAGER_ADDRESS, provider, abi_file_path);
Self { instance }
}
pub async fn get_stake_minimum(&self) -> Result<U256, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("getStakeMinimum", &[])?
.call()
.await?;
let minimum: U256 = result
.into_iter()
.next()
.map(|value| value.as_uint().unwrap_or_default())
.unwrap_or_default()
.0;
Ok(minimum)
}
pub async fn get_stake(&self, staker: Address) -> Result<U256, Box<dyn std::error::Error>> {
let result = self
.instance
.instance()
.function("getStake", &[staker.into()])?
.call()
.await?;
Ok(result[0].as_uint().unwrap_or_default().0)
}
pub async fn calculate_stake(
&self,
compute_units: U256,
provider_total_compute: U256,
) -> Result<U256, Box<dyn std::error::Error>> {
let min_stake_per_unit = self.get_stake_minimum().await?;
let total_compute = provider_total_compute + compute_units + U256::from(1);
let required_stake = total_compute * min_stake_per_unit;
Ok(required_stake)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/work_validators/synthetic_data_validator.rs | crates/shared/src/web3/contracts/implementations/work_validators/synthetic_data_validator.rs | use crate::web3::contracts::core::contract::Contract;
use alloy::{
dyn_abi::{DynSolValue, Word},
primitives::{Address, U256},
};
use anyhow::Error;
use serde::Deserialize;
use serde::Serialize;
#[derive(Clone)]
pub struct SyntheticDataWorkValidator<P: alloy_provider::Provider> {
pub instance: Contract<P>,
}
#[derive(Debug, Deserialize, Serialize, Default, Clone, Copy)]
pub struct WorkInfo {
pub provider: Address,
pub node_id: Address,
pub timestamp: u64,
pub work_units: U256,
}
impl WorkInfo {
pub fn is_valid(&self) -> bool {
self.node_id != Address::ZERO
&& self.work_units != U256::ZERO
&& self.provider != Address::ZERO
}
}
impl<P: alloy_provider::Provider> SyntheticDataWorkValidator<P> {
pub fn new(address: Address, provider: P, abi_file_path: &str) -> Self {
let instance = Contract::new(address, provider, abi_file_path);
Self { instance }
}
pub async fn get_work_keys(&self, pool_id: U256) -> Result<Vec<String>, Error> {
let result = self
.instance
.instance()
.function("getWorkKeys", &[pool_id.into()])?
.call()
.await?;
let array_value = result
.into_iter()
.next()
.ok_or_else(|| Error::msg("No result returned from getWorkKeys"))?;
let array = array_value
.as_array()
.ok_or_else(|| Error::msg("Result is not an array"))?;
let work_keys = array
.iter()
.map(|value| {
let bytes = value
.as_fixed_bytes()
.ok_or_else(|| Error::msg("Value is not fixed bytes"))?;
if bytes.0.len() != 32 {
return Err(Error::msg(format!(
"Expected 32 bytes, got {}",
bytes.0.len()
)));
}
Ok(hex::encode(bytes.0))
})
.collect::<Result<Vec<String>, Error>>()?;
Ok(work_keys)
}
pub async fn get_work_info(&self, pool_id: U256, work_key: &str) -> Result<WorkInfo, Error> {
let work_key_bytes = hex::decode(work_key)?;
if work_key_bytes.len() != 32 {
return Err(Error::msg("Work key must be 32 bytes"));
}
let fixed_bytes = DynSolValue::FixedBytes(Word::from_slice(&work_key_bytes), 32);
let result = self
.instance
.instance()
.function("getWorkInfo", &[pool_id.into(), fixed_bytes])?
.call()
.await?;
let tuple = result
.into_iter()
.next()
.ok_or_else(|| Error::msg("No result returned from getWorkInfo"))?;
let tuple_array = tuple
.as_tuple()
.ok_or_else(|| Error::msg("Result is not a tuple"))?;
if tuple_array.len() != 4 {
return Err(Error::msg("Invalid tuple length"));
}
let provider = tuple_array[0]
.as_address()
.ok_or_else(|| Error::msg("Provider is not an address"))?;
let node_id = tuple_array[1]
.as_address()
.ok_or_else(|| Error::msg("Node ID is not an address"))?;
let timestamp = u64::try_from(
tuple_array[2]
.as_uint()
.ok_or_else(|| Error::msg("Timestamp is not a uint"))?
.0,
)
.map_err(|_| Error::msg("Timestamp conversion failed"))?;
let work_units = tuple_array[3]
.as_uint()
.ok_or_else(|| Error::msg("Work units is not a uint"))?
.0;
Ok(WorkInfo {
provider,
node_id,
timestamp,
work_units,
})
}
pub async fn get_work_since(
&self,
pool_id: U256,
timestamp: U256,
) -> Result<Vec<String>, Error> {
let result = self
.instance
.instance()
.function("getWorkSince", &[pool_id.into(), timestamp.into()])?
.call()
.await?;
let array_value = result
.into_iter()
.next()
.ok_or_else(|| Error::msg("No result returned from getWorkSince"))?;
let array = array_value
.as_array()
.ok_or_else(|| Error::msg("Result is not an array"))?;
let work_keys = array
.iter()
.map(|value| {
let bytes = value
.as_fixed_bytes()
.ok_or_else(|| Error::msg("Value is not fixed bytes"))?;
if bytes.0.len() != 32 {
return Err(Error::msg(format!(
"Expected 32 bytes, got {}",
bytes.0.len()
)));
}
Ok(hex::encode(bytes.0))
})
.collect::<Result<Vec<String>, Error>>()?;
Ok(work_keys)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/implementations/work_validators/mod.rs | crates/shared/src/web3/contracts/implementations/work_validators/mod.rs | pub mod synthetic_data_validator;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/constants/mod.rs | crates/shared/src/web3/contracts/constants/mod.rs | pub mod addresses;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/web3/contracts/constants/addresses.rs | crates/shared/src/web3/contracts/constants/addresses.rs | use alloy::primitives::{hex, Address};
// TODO: Parse these from env
#[cfg(not(feature = "testnet"))]
pub mod contract_addresses {
use super::*;
pub const PRIME_NETWORK_ADDRESS: Address =
Address::new(hex!("0xCf7Ed3AccA5a467e9e704C703E8D87F634fB0Fc9"));
pub const AI_TOKEN_ADDRESS: Address =
Address::new(hex!("0x5FbDB2315678afecb367f032d93F642f64180aa3"));
pub const COMPUTE_REGISTRY_ADDRESS: Address =
Address::new(hex!("0x5FC8d32690cc91D4c39d9d3abcBD16989F875707"));
pub const DOMAIN_REGISTRY_ADDRESS: Address =
Address::new(hex!("0x0165878A594ca255338adfa4d48449f69242Eb8F"));
pub const STAKE_MANAGER_ADDRESS: Address =
Address::new(hex!("0xa513E6E4b8f2a923D98304ec87F64353C4D5C853"));
pub const COMPUTE_POOL_ADDRESS: Address =
Address::new(hex!("0x610178dA211FEF7D417bC0e6FeD39F05609AD788"));
}
#[cfg(feature = "testnet")]
pub mod contract_addresses {
use super::*;
pub const PRIME_NETWORK_ADDRESS: Address =
Address::new(hex!("0x8eA4f11DbfDfE2D7f8fB64Dd2c9dd4Ed610bf03E"));
pub const AI_TOKEN_ADDRESS: Address =
Address::new(hex!("0xAF874da2758fd319656D07cAD856EE1220c949d6"));
pub const COMPUTE_REGISTRY_ADDRESS: Address =
Address::new(hex!("0x43308a48E7bf1349e0f1732e4B027af6770d8f64"));
pub const DOMAIN_REGISTRY_ADDRESS: Address =
Address::new(hex!("0xc3dC276d7D23eDb8E17D40B9d04dc016a94E3380"));
pub const STAKE_MANAGER_ADDRESS: Address =
Address::new(hex!("0x5bD0CFDCD2d5D548A0d161CB47234798701c91BE"));
pub const COMPUTE_POOL_ADDRESS: Address =
Address::new(hex!("0x8c924BE4413931384A917bE76F4f8Aa6A56a674c"));
}
pub use contract_addresses::*;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/p2p/service.rs | crates/shared/src/p2p/service.rs | use crate::web3::wallet::Wallet;
use anyhow::{bail, Context as _, Result};
use futures::stream::FuturesUnordered;
use p2p::{
AuthenticationInitiationRequest, AuthenticationResponse, AuthenticationSolutionRequest,
IncomingMessage, Libp2pIncomingMessage, Node, NodeBuilder, OutgoingMessage, PeerId, Protocol,
Protocols, Response,
};
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
pub struct OutgoingRequest {
pub peer_wallet_address: alloy::primitives::Address,
pub request: p2p::Request,
pub peer_id: String,
pub multiaddrs: Vec<String>,
pub response_tx: tokio::sync::oneshot::Sender<p2p::Response>,
}
/// A p2p service implementation that is used by the validator and the orchestrator.
/// It handles the authentication protocol used before sending
/// requests to the worker.
pub struct Service {
node: Node,
incoming_messages_rx: Receiver<IncomingMessage>,
outgoing_messages_rx: Receiver<OutgoingRequest>,
cancellation_token: CancellationToken,
context: Context,
}
impl Service {
pub fn new(
keypair: p2p::Keypair,
port: u16,
cancellation_token: CancellationToken,
wallet: Wallet,
protocols: Protocols,
) -> Result<(Self, Sender<OutgoingRequest>)> {
let (node, incoming_messages_rx, outgoing_messages) =
build_p2p_node(keypair, port, cancellation_token.clone(), protocols.clone())
.context("failed to build p2p node")?;
let (outgoing_messages_tx, outgoing_messages_rx) = tokio::sync::mpsc::channel(100);
Ok((
Self {
node,
incoming_messages_rx,
outgoing_messages_rx,
cancellation_token,
context: Context::new(outgoing_messages, wallet, protocols),
},
outgoing_messages_tx,
))
}
pub async fn run(self) {
use futures::StreamExt as _;
let Self {
node,
mut incoming_messages_rx,
mut outgoing_messages_rx,
cancellation_token,
context,
} = self;
tokio::task::spawn(node.run());
let mut incoming_message_handlers = FuturesUnordered::new();
let mut outgoing_message_handlers = FuturesUnordered::new();
loop {
tokio::select! {
_ = cancellation_token.cancelled() => {
break;
}
Some(message) = outgoing_messages_rx.recv() => {
let handle = tokio::task::spawn(handle_outgoing_message(message, context.clone()));
outgoing_message_handlers.push(handle);
}
Some(message) = incoming_messages_rx.recv() => {
let context = context.clone();
let handle = tokio::task::spawn(
handle_incoming_message(message, context)
);
incoming_message_handlers.push(handle);
}
Some(res) = incoming_message_handlers.next() => {
if let Err(e) = res {
log::error!("failed to handle incoming message: {e}");
}
}
Some(res) = outgoing_message_handlers.next() => {
if let Err(e) = res {
log::error!("failed to handle outgoing message: {e}");
}
}
}
}
}
}
fn build_p2p_node(
keypair: p2p::Keypair,
port: u16,
cancellation_token: CancellationToken,
protocols: Protocols,
) -> Result<(Node, Receiver<IncomingMessage>, Sender<OutgoingMessage>)> {
NodeBuilder::new()
.with_keypair(keypair)
.with_port(port)
.with_authentication()
.with_protocols(protocols)
.with_cancellation_token(cancellation_token)
.try_build()
}
#[derive(Clone)]
struct Context {
// outbound message channel; receiver is held by libp2p node
outgoing_messages: Sender<OutgoingMessage>,
// ongoing authentication requests
ongoing_auth_requests: Arc<RwLock<HashMap<PeerId, OngoingAuthChallenge>>>,
is_authenticated_with_peer: Arc<RwLock<HashSet<PeerId>>>,
// this assumes that there is only one outbound request per protocol per peer at a time,
// is this a correct assumption?
// response channel is for sending the response back to the caller who initiated the request
#[allow(clippy::type_complexity)]
ongoing_outbound_requests:
Arc<RwLock<HashMap<(PeerId, Protocol), tokio::sync::oneshot::Sender<Response>>>>,
wallet: Wallet,
protocols: Protocols,
}
#[derive(Debug)]
struct OngoingAuthChallenge {
peer_wallet_address: alloy::primitives::Address,
auth_challenge_request_message: String,
outgoing_message: p2p::Request,
response_tx: tokio::sync::oneshot::Sender<Response>,
}
impl Context {
fn new(
outgoing_messages: Sender<OutgoingMessage>,
wallet: Wallet,
protocols: Protocols,
) -> Self {
Self {
outgoing_messages,
ongoing_auth_requests: Arc::new(RwLock::new(HashMap::new())),
is_authenticated_with_peer: Arc::new(RwLock::new(HashSet::new())),
ongoing_outbound_requests: Arc::new(RwLock::new(HashMap::new())),
wallet,
protocols,
}
}
}
async fn handle_outgoing_message(message: OutgoingRequest, context: Context) -> Result<()> {
use rand_v8::rngs::OsRng;
use rand_v8::Rng as _;
use std::str::FromStr as _;
let OutgoingRequest {
peer_wallet_address,
request,
peer_id,
multiaddrs,
response_tx,
} = message;
let peer_id = PeerId::from_str(&peer_id).context("failed to parse peer id")?;
// check if we're authenticated already
let is_authenticated_with_peer = context.is_authenticated_with_peer.read().await;
if is_authenticated_with_peer.contains(&peer_id) {
log::debug!(
"already authenticated with peer {peer_id}, skipping validation authentication"
);
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
ongoing_outbound_requests.insert((peer_id, request.protocol()), response_tx);
// multiaddresses are already known, as we've connected to them previously
context
.outgoing_messages
.send(request.into_outgoing_message(peer_id, vec![]))
.await
.context("failed to send outgoing message")?;
return Ok(());
}
// ensure there's no ongoing challenge
// use write-lock to make this atomic until we finish sending the auth request and writing to the map
let mut ongoing_auth_requests = context.ongoing_auth_requests.write().await;
if ongoing_auth_requests.contains_key(&peer_id) {
bail!("ongoing auth request for {} already exists", peer_id);
}
let multiaddrs = multiaddrs
.iter()
.filter_map(
|addr| p2p::Multiaddr::from_str(addr).ok(), /* ?.with_p2p(peer_id).ok()*/
)
.collect::<Vec<_>>();
if multiaddrs.is_empty() {
bail!("no valid multiaddrs for peer id {peer_id}");
}
// create the authentication challenge request message
let challenge_bytes: [u8; 32] = OsRng.gen();
let auth_challenge_message: String = hex::encode(challenge_bytes);
let req: p2p::Request = AuthenticationInitiationRequest {
message: auth_challenge_message.clone(),
}
.into();
let outgoing_message = req.into_outgoing_message(peer_id, multiaddrs);
log::debug!("sending ValidatorAuthenticationInitiationRequest to {peer_id}");
context
.outgoing_messages
.send(outgoing_message)
.await
.context("failed to send outgoing message")?;
// store the ongoing auth challenge
let ongoing_challenge = OngoingAuthChallenge {
peer_wallet_address,
auth_challenge_request_message: auth_challenge_message.clone(),
outgoing_message: request,
response_tx,
};
ongoing_auth_requests.insert(peer_id, ongoing_challenge);
Ok(())
}
async fn handle_incoming_message(message: IncomingMessage, context: Context) -> Result<()> {
match message.message {
Libp2pIncomingMessage::Request {
request_id: _,
request,
channel: _,
} => {
log::error!(
"node should not receive incoming requests: {request:?} from {}",
message.peer
);
}
Libp2pIncomingMessage::Response {
request_id: _,
response,
} => {
log::debug!("received incoming response {response:?}");
handle_incoming_response(message.peer, response, context)
.await
.context("failed to handle incoming response")?;
}
}
Ok(())
}
async fn handle_incoming_response(
from: PeerId,
response: p2p::Response,
context: Context,
) -> Result<()> {
match response {
p2p::Response::Authentication(resp) => {
log::debug!("received ValidatorAuthenticationSolutionResponse from {from}: {resp:?}");
handle_validation_authentication_response(from, resp, context)
.await
.context("failed to handle validator authentication response")?;
}
p2p::Response::HardwareChallenge(ref resp) => {
if !context.protocols.has_hardware_challenge() {
bail!("received HardwareChallengeResponse from {from}, but hardware challenge protocol is not enabled");
}
log::debug!("received HardwareChallengeResponse from {from}: {resp:?}");
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
let Some(response_tx) =
ongoing_outbound_requests.remove(&(from, Protocol::HardwareChallenge))
else {
bail!(
"no ongoing hardware challenge for peer {from}, cannot handle HardwareChallengeResponse"
);
};
let _ = response_tx.send(response);
}
p2p::Response::Invite(ref resp) => {
if !context.protocols.has_invite() {
bail!("received InviteResponse from {from}, but invite protocol is not enabled");
}
log::debug!("received InviteResponse from {from}: {resp:?}");
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
let Some(response_tx) = ongoing_outbound_requests.remove(&(from, Protocol::Invite))
else {
bail!("no ongoing invite for peer {from}, cannot handle InviteResponse");
};
let _ = response_tx.send(response);
}
p2p::Response::GetTaskLogs(ref resp) => {
if !context.protocols.has_get_task_logs() {
bail!("received GetTaskLogsResponse from {from}, but get task logs protocol is not enabled");
}
log::debug!("received GetTaskLogsResponse from {from}: {resp:?}");
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
let Some(response_tx) =
ongoing_outbound_requests.remove(&(from, Protocol::GetTaskLogs))
else {
bail!("no ongoing GetTaskLogs for peer {from}, cannot handle GetTaskLogsResponse");
};
let _ = response_tx.send(response);
}
p2p::Response::RestartTask(ref resp) => {
if !context.protocols.has_restart() {
bail!("received RestartResponse from {from}, but restart protocol is not enabled");
}
log::debug!("received RestartResponse from {from}: {resp:?}");
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
let Some(response_tx) = ongoing_outbound_requests.remove(&(from, Protocol::Restart))
else {
bail!("no ongoing Restart for peer {from}, cannot handle RestartResponse");
};
let _ = response_tx.send(response);
}
p2p::Response::General(ref resp) => {
if !context.protocols.has_general() {
bail!("received GeneralResponse from {from}, but general protocol is not enabled");
}
log::debug!("received GeneralResponse from {from}: {resp:?}");
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
let Some(response_tx) = ongoing_outbound_requests.remove(&(from, Protocol::General))
else {
bail!("no ongoing General for peer {from}, cannot handle GeneralResponse");
};
let _ = response_tx.send(response);
}
}
Ok(())
}
async fn handle_validation_authentication_response(
from: PeerId,
response: p2p::AuthenticationResponse,
context: Context,
) -> Result<()> {
use crate::security::request_signer::sign_message;
use std::str::FromStr as _;
match response {
AuthenticationResponse::Initiation(req) => {
let ongoing_auth_requests = context.ongoing_auth_requests.read().await;
let Some(ongoing_challenge) = ongoing_auth_requests.get(&from) else {
bail!(
"no ongoing hardware challenge for peer {from}, cannot handle ValidatorAuthenticationInitiationResponse"
);
};
let Ok(parsed_signature) = alloy::primitives::Signature::from_str(&req.signature)
else {
bail!("failed to parse signature from response");
};
// recover address from the challenge message that the peer signed
let Ok(recovered_address) = parsed_signature
.recover_address_from_msg(&ongoing_challenge.auth_challenge_request_message)
else {
bail!("failed to recover address from response signature")
};
// verify the recovered address matches the expected worker wallet address
if recovered_address != ongoing_challenge.peer_wallet_address {
bail!(
"peer address verification failed: expected {}, got {recovered_address}",
ongoing_challenge.peer_wallet_address,
)
}
log::debug!("auth challenge initiation response received from node: {from}");
let signature = sign_message(&req.message, &context.wallet).await.unwrap();
let req: p2p::Request = AuthenticationSolutionRequest { signature }.into();
let req = req.into_outgoing_message(from, vec![]);
context
.outgoing_messages
.send(req)
.await
.context("failed to send outgoing message")?;
}
AuthenticationResponse::Solution(req) => {
let mut ongoing_auth_requests = context.ongoing_auth_requests.write().await;
let Some(ongoing_challenge) = ongoing_auth_requests.remove(&from) else {
bail!(
"no ongoing hardware challenge for peer {from}, cannot handle ValidatorAuthenticationSolutionResponse"
);
};
match req {
p2p::AuthenticationSolutionResponse::Granted => {}
p2p::AuthenticationSolutionResponse::Rejected => {
log::debug!("auth challenge rejected by node: {from}");
return Ok(());
}
}
// auth was granted, finally send the hardware challenge
let mut is_authenticated_with_peer = context.is_authenticated_with_peer.write().await;
is_authenticated_with_peer.insert(from);
let protocol = ongoing_challenge.outgoing_message.protocol();
let req = ongoing_challenge
.outgoing_message
.into_outgoing_message(from, vec![]);
context
.outgoing_messages
.send(req)
.await
.context("failed to send outgoing message")?;
let mut ongoing_outbound_requests = context.ongoing_outbound_requests.write().await;
ongoing_outbound_requests.insert((from, protocol), ongoing_challenge.response_tx);
}
}
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/shared/src/p2p/mod.rs | crates/shared/src/p2p/mod.rs | mod service;
pub use service::*;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/lib.rs | src/lib.rs | //! # Blend - A crate for parsing .blend files from Blender
//!
//! ## Example
//!
//! ```rust
//! # use blend::Blend;
//!
//! /// Prints the name and position of every object
//! # fn main() {
//! let blend = Blend::from_path("examples/blend_files/3_5.blend").expect("error loading blend file");
//!
//! for obj in blend.instances_with_code(*b"OB") {
//! let loc = obj.get_f32_vec("loc");
//! let name = obj.get("id").get_string("name");
//!
//! println!("\"{}\" at {:?}", name, loc);
//! }
//! # }
//! ```
//!
//! ## The .blend file
//!
//! To use this crate to its full extent it is useful to understand how the .blend file works internally. A simplified
//! overview: Blender creates the save file by dumping its memory to the disk, this means that a .blend file is
//! a list of C-like structs which can contain primitives, arrays, pointers and other structs. The following is how a
//! Camera is defined in Blender's memory (in Rust-like syntax):
//!
//! ```text
//! struct Camera {
//! id: ID {
//! name: [u8; 66] = "CACamera"
//! //[... other ommited properties ...]
//! },
//! adt: *AnimData = null,
//! type: u8 = 0,
//! dtx: u8 = 0,
//! flag: f32 = 4,
//! passepartalpha: f32 = 0.5,
//! clipsta: f32 = 0.1,
//! clipend: f32 = 100,
//! lens: f32 = 50,
//! ortho_scale: f32 = 7.3142858,
//! drawsize: f32 = 1,
//! sensor_x: f32 = 36,
//! sensor_y: f32 = 24,
//! shiftx: f32 = 0,
//! shifty: f32 = 0,
//! YF_dofdist: f32 = 0,
//! ipo: *Ipo = null,
//! dof_ob: *Object = null,
//! //[... other ommited properties ...]
//! }
//! ```
//!
//! Other concepts are explained in the docs for methods where knowing these concepts is necessary.
//!
//! ### Learn more
//!
//! Documentation on the .blend file is a bit sparse, but the most common source is the [Mystery of the Blend](https://github.com/fschutt/mystery-of-the-blend-backup)
//! and a personal recommendation is to get it from the official Blender repository and apply the following [patch](https://developer.blender.org/T52387).
//!
//! ## This crate
//!
//! This crate provides a parser and a runtime for these structures which means you can access them as if they were
//! simple objects in memory. The aim here is to abstract most of the noise into a single coherent interface. For
//! example: you can access a struct through both a non-primitive value field and a non-null pointer field using the
//! same method call (`Instance::get`). Other abstractions are provided where possible: the .blend file has at least
//! 3 ways of defining lists of things, this crate provides a single method that unifies all of those.
//!
//! This crate is also lazy. While some work has to be done upfront to find the structs and their type definitions,
//! the binary data of the blocks is not parsed until you actually access it.
//!
//! ### Usage tips
//!
//! Knowing what to read from the file can be a bit of a challenge. A simple .blend file has over 400 "blocks" and each can
//! represent one or more structs. If you don't know what you want to access exactly the `print_blend` example can be
//! helpful. You can use it to save an entire .blend file as text to disk. You can also print single struct instances
//! if you know somewhat what you need.
//!
//! It's also important to note that when printing an `Instance` if one of their properties is a list, elements other
//! than the first are skipped. If you need to see the entire list simply access it and print its members individually.
//!
//! The `Display` implementation for `Instance` is a bit unpolished so larger .blend
//! files might cause a stack overflow but that can be fixed by running the code in release mode. If you find something that
//! breaks formatting please open an issue.
//!
//! ### Running examples
//!
//! A .blend file may contain personal information from the machine it was created, that's why no .blend files are provided
//! in this repository. To run the examples create a folder named `blend_files` inside the `examples` folder, put any file
//! you want there and change the paths in the examples.
//!
//! ### Supported versions
//!
//! As the .blend file is self-describing it should possible to parse files from every Blender version (tests were done
//! on files from version 2.72 to 2.80). Some things are assumed to always be true though: the type `int` for example is
//! always considered equivalent to Rust's `i32` but there is nothing in the file specification that guarantees this. There
//! is very little reason to believe Blender would change its primitive types though.
//!
//! ### Warnings
//!
//! This crate is meant to be used with trusted .blend files, while no unsafety or undefined behaviour is expected
//! from reading a malicious file, it is probably possible to craft a .blend file that causes a panic at runtime.
//!
//! Due to some quirks of the specification, parts of the
//! library are a bit liberal in what kind of conversions it allows. For example: any block with the correct
//! size can be parsed as a list of floats. Why? Because some blocks are actual arrays of floats but we don't have
//! enough type information to be sure of this. This means it is up to the user to decide what they want when accessing
//! the data.
//!
//! This crate is also somewhat panic happy. While it should be always possible to check if the field you are accessing
//! exists, is valid, contains a particular type of data, etc, you are meant to know what you are accessing ahead of
//! time so almost none of the functions will return a `Result::Err` or `Option::None` on bad inputs.
//!
//! Finally, this was developed to facilitate game development but should be useful for any use case.
//!
//! ### Limitations
//!
//! This crate does not support compressed .blend files and it also does not support writing .blend files. To solve the
//! first you can uncompress the file before passing the data to `blend::Blend::from_data` see the `print_blend` example
//! to see how. The second one is a bit harder due to the way the code is organized, but PRs are welcome!
//!
//! `GLOB`, `REND` and `TEST` blocks are not fully supported. Parts of the code already supports these blocks but they are
//! not fully implemented as I haven't found a use-case for them. Open an issue if you would like support for these!
pub mod parsers;
pub mod runtime;
pub use runtime::{Blend, Instance};
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/runtime.rs | src/runtime.rs | use crate::parsers::{
blend::{Block, BlockData, Header as BlendHeader, RawBlend},
dna::{Dna, DnaStruct, DnaType},
field::{parse_field, FieldInfo},
primitive::*,
BlendParseError, Endianness, PointerSize,
};
use linked_hash_map::LinkedHashMap;
use std::fmt;
use std::{io::Read, mem::size_of, num::NonZeroU64, path::Path};
/// An `Instance`'s data can be a reference to a `Block` if the `Instance` represents a root or subsidiary block,
/// or it can be raw bytes if the `Instance` was created by accessing a field in another `Instance`.
#[derive(Clone, Debug)]
pub enum InstanceDataFormat<'a> {
Block(&'a Block),
Raw(&'a [u8]),
}
/// Pointers in the blend file are valid if the file contains another block with the correct address. They are invalid
/// if no block is found with the correct address.
pub enum PointerInfo<'a> {
Block(&'a Block),
Null,
Invalid,
}
impl<'a> InstanceDataFormat<'a> {
/// `get` accesses only a specific slice of the underlying data.
pub fn get(&self, start: usize, len: usize) -> &'a [u8] {
&self.data()[start..start + len]
}
/// Simplifies the access to the underlying data inside the `InstanceDataFormat`.
pub fn data(&self) -> &'a [u8] {
match self {
InstanceDataFormat::Block(block) => match block {
Block::Principal { data, .. }
| Block::Subsidiary { data, .. }
| Block::Global { data, .. } => &data.data[..],
_ => unimplemented!(),
},
InstanceDataFormat::Raw(data) => &data[..],
}
}
/// Returns the code of the underlying block, if it has one.
fn code(&self) -> Option<[u8; 4]> {
match self {
InstanceDataFormat::Block(block) => match block {
Block::Principal { code, .. } => Some([code[0], code[1], 0, 0]),
Block::Global { .. } => Some(*b"GLOB"),
Block::Rend { .. } => Some(*b"REND"),
Block::Test { .. } => Some(*b"TEST"),
Block::Dna { .. } => Some(*b"DNA1"),
Block::Subsidiary { .. } => None,
},
InstanceDataFormat::Raw(_) => None,
}
}
/// Returns the memory address of the underlying block, if it has one.
pub fn memory_address(&self) -> Option<NonZeroU64> {
match self {
InstanceDataFormat::Block(block) => match block {
Block::Principal { memory_address, .. }
| Block::Subsidiary { memory_address, .. }
| Block::Global { memory_address, .. } => Some(*memory_address),
_ => unimplemented!(),
},
InstanceDataFormat::Raw(_) => None,
}
}
}
/// Represents a field inside a struct. The data `FieldTemplate` keeps is used to interpret the raw bytes of the block.
#[derive(Debug, Clone)]
pub struct FieldTemplate {
//pub name: String,
pub info: FieldInfo,
/// The index of this field's type inside the `Dna::types` array.
pub type_index: usize,
/// The type name of this field. Used for pretty printing and some sanity checks.
pub type_name: String,
/// The index of the data in the `Instance` owned by this field.
pub data_start: usize,
/// The length in bytes of the data in the `Instance` owned by this field.
pub data_len: usize,
/// A field can represent a primitive or a struct.
pub is_primitive: bool,
}
/// Represents a block of data inside the blend file. An `Instance` can be a camera, a mesh, a material, or anything
/// else Blender uses internally, like material nodes, user settings or render options. An `Instance` is conceptually a
/// `struct`: a collection of named fields which can themselves be structs or primitives.
#[derive(Clone)]
pub struct Instance<'a> {
/// References to the `Dna` and the `ParsedBlend` are kept because we only interpret data when the user accesses it.
dna: &'a Dna,
blend: &'a RawBlend,
pub type_name: String,
/// The raw binary data this `Instance` owns.
pub data: InstanceDataFormat<'a>,
/// The fields of this `Instance`.
pub fields: LinkedHashMap<String, FieldTemplate>, //We use a LinkedHashMap here because we want to preserve insertion order
}
impl<'a> std::fmt::Debug for Instance<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Instance")
.field("type_name", &self.type_name)
.field("fields", &self.fields)
.finish()
}
}
#[allow(clippy::cognitive_complexity)]
fn fmt_instance(
seen_addresses: &mut std::collections::HashSet<NonZeroU64>,
f: &mut fmt::Formatter,
inst: &Instance,
ident: usize,
) -> fmt::Result {
let ident_str: String = " ".repeat(4 * ident);
write!(f, "{}", inst.type_name)?;
match (inst.data.code(), inst.data.memory_address()) {
(Some(code), Some(memory_address)) => {
write!(
f,
" (code:{}|@{})",
String::from_utf8_lossy(&code[0..=1]),
memory_address
)?;
}
(Some(code), None) => {
write!(f, " (code:{})", String::from_utf8_lossy(&code[0..=1]))?;
}
(None, Some(memory_address)) => {
write!(f, " (@{})", memory_address)?;
}
(None, None) => {}
}
writeln!(f, " {{")?;
for (field_name, field) in inst.fields.iter().filter(|(n, _)| !n.starts_with("_pad")) {
match &field.info {
FieldInfo::Value => {
write!(f, "{} {}: ", ident_str, field_name)?;
match &field.type_name[..] {
"char" => writeln!(f, "{} = {};", field.type_name, inst.get_i8(field_name))?,
"uchar" => writeln!(f, "{} = {};", field.type_name, inst.get_u8(field_name))?,
"short" => writeln!(f, "{} = {};", field.type_name, inst.get_i16(field_name))?,
"ushort" => writeln!(f, "{} = {};", field.type_name, inst.get_u16(field_name))?,
"int" => writeln!(f, "{} = {};", field.type_name, inst.get_i32(field_name))?,
"long" => writeln!(f, "{} = {};", field.type_name, inst.get_i32(field_name))?,
"ulong" => writeln!(f, "{} = {};", field.type_name, inst.get_u32(field_name))?,
"float" => writeln!(f, "{} = {};", field.type_name, inst.get_f32(field_name))?,
"double" => writeln!(f, "{} = {};", field.type_name, inst.get_f64(field_name))?,
"int64_t" => writeln!(f, "{} = {};", field.type_name, inst.get_i64(field_name))?,
"uint64_t" => writeln!(f, "{} = {};", field.type_name, inst.get_u64(field_name))?,
"void" => writeln!(f, "{} = ();", field.type_name)?,
"int8_t" => writeln!(f, "{} = {};", field.type_name, inst.get_i8(field_name))?,
_ if field.is_primitive => panic!("unknown primitive {:?}", field),
_ => {
if field.type_name == "ListBase" {
if inst.is_valid(field_name) {
let list_base_instance = inst
.get_iter(field_name)
.next()
.expect("a valid ListBase always has at least one element");
writeln!(f, "ListBase<{}>[?] = [", list_base_instance.type_name)?;
if list_base_instance.data.code().is_none() {
if !seen_addresses
.contains(&list_base_instance.memory_address())
{
seen_addresses.insert(list_base_instance.memory_address());
write!(f, "{} ", ident_str)?;
fmt_instance(
seen_addresses,
f,
&list_base_instance,
ident + 2,
)?;
} else {
writeln!(
f,
"{} @{},",
ident_str,
list_base_instance.memory_address()
)?;
}
} else {
unimplemented!()
}
writeln!(f, "{} ];", ident_str)?;
} else {
writeln!(f, "ListBase<unknown>[] = null;")?;
}
} else {
fmt_instance(seen_addresses, f, &inst.get(field_name), ident + 1)?;
}
}
}
}
FieldInfo::ValueArray { dimensions, .. } => {
write!(
f,
"{} {}: {}{:?} = ",
ident_str, field_name, field.type_name, dimensions
)?;
match &field.type_name[..] {
"char" => {
let data = inst.data.get(field.data_start, field.data_len);
// Some char arrays might be interpreted as strings if their first element is 0.
if let Ok(string_data) = String::from_utf8(
data.iter().take_while(|&&b| b != 0).cloned().collect(),
) {
writeln!(f, "\"{}\";", string_data)?;
} else {
writeln!(f, "{:?};", inst.get_u8_vec(field_name))?;
}
}
"uchar" => writeln!(f, "{:?};", inst.get_u8_vec(field_name))?,
"short" => writeln!(f, "{:?};", inst.get_i16_vec(field_name))?,
"ushort" => writeln!(f, "{:?};", inst.get_u16_vec(field_name))?,
"int" => writeln!(f, "{:?};", inst.get_i32_vec(field_name))?,
"long" => writeln!(f, "{:?};", inst.get_i32_vec(field_name))?,
"ulong" => writeln!(f, "{:?};", inst.get_u32_vec(field_name))?,
"float" => writeln!(f, "{:?};", inst.get_f32_vec(field_name))?,
"double" => writeln!(f, "{:?};", inst.get_f64_vec(field_name))?,
"int64_t" => writeln!(f, "{:?};", inst.get_i64_vec(field_name))?,
"void" => writeln!(f, "void;")?,
"int8_t" => writeln!(f, "{:?};", inst.get_i8_vec(field_name))?,
"uint64_t" => writeln!(f, "{:?};", inst.get_u64_vec(field_name))?,
_ if field.is_primitive => panic!("unknown primitive"),
_ => {
writeln!(f, "[")?;
if let Some(i) = inst.get_iter(field_name).next() {
write!(f, "{} ", ident_str)?;
fmt_instance(seen_addresses, f, &i, ident + 2)?;
}
writeln!(f, "{} ];", ident_str)?;
}
}
}
FieldInfo::Pointer {
indirection_count: 1,
} => {
if ["next", "prev", "first", "last"]
.iter()
.any(|n| n == field_name)
{
if inst.is_valid(field_name) {
writeln!(
f,
"{} {}: {} = (@{});",
ident_str,
field_name,
inst.get(field_name).type_name,
inst.parse_ptr_address(inst.data.get(field.data_start, field.data_len))
.unwrap()
)?
} else {
writeln!(
f,
"{} {}: {} = null;",
ident_str, field_name, field.type_name
)?
}
} else if inst.is_valid(field_name) {
let mut has_non_primitive_data = true;
match field.info {
FieldInfo::Pointer { indirection_count } if indirection_count == 1 => {
let pointer = inst.get_ptr(field);
let block = match pointer {
PointerInfo::Block(block) => block,
PointerInfo::Null | PointerInfo::Invalid => panic!(
"field '{}' is null or doesn't point to a valid block. ({:?})",
field_name, field
),
};
if let Block::Subsidiary { dna_index, .. } = block {
has_non_primitive_data = if field.type_index >= 13 {
dna_index >= &13 || inst
.dna()
.structs
.iter()
.any(|s| s.type_index == field.type_index)
} else {
let r#struct = &inst.dna().structs[*dna_index];
r#struct.type_index >= 13
};
}
}
_ => (),
}
if has_non_primitive_data {
let ptr_inst = inst.get(field_name);
//assert!(!seen_addresses.contains(&ptr_inst.memory_address()));
if ptr_inst.data.code().is_none()
&& !seen_addresses.contains(&ptr_inst.memory_address())
{
if ptr_inst.type_name == "Link" {
match ptr_inst.data {
InstanceDataFormat::Block(block) => match block {
Block::Principal { data, .. }
| Block::Subsidiary { data, .. } => {
writeln!(
f,
"{} {}: {}^ = {:?};",
ident_str, field_name, field.type_name, data.data
)?
}
_ => unimplemented!(),
}
_ => {
writeln!(
f,
"{} {}: {}! = {:?};",
ident_str, field_name, field.type_name, ptr_inst
)?
}
}
} else {
seen_addresses.insert(ptr_inst.memory_address());
match ptr_inst.data {
InstanceDataFormat::Block(block) => match block {
Block::Principal { data, .. }
| Block::Subsidiary { data, .. } => {
if data.count > 1 {
writeln!(
f,
"{} {}: {}[{}] = [",
ident_str,
field_name,
field.type_name,
data.count
)?;
if let Some(p) = inst.get_iter(field_name).next() {
write!(f, "{} ", ident_str)?;
fmt_instance(seen_addresses, f, &p, ident + 2)?;
}
writeln!(f, "{} ];", ident_str)?;
} else {
write!(
f,
"{} {}: {} = ",
ident_str, field_name, field.type_name
)?;
fmt_instance(
seen_addresses,
f,
&ptr_inst,
ident + 1,
)?;
}
}
_ => unimplemented!(),
},
_ => unimplemented!(),
}
}
} else {
writeln!(
f,
"{} {}: {} = (@{});",
ident_str,
field_name,
field.type_name,
inst.parse_ptr_address(
inst.data.get(field.data_start, field.data_len)
)
.unwrap()
)?
}
} else { // the type of this field cannot be determined, it's probably an array of primitives
write!(f, "{} {}: {}* = ", ident_str, field_name, field.type_name)?;
let ptr_inst = inst.get_ptr(field);
match ptr_inst {
PointerInfo::Block(block) => {
match block {
Block::Principal { data, .. }
| Block::Subsidiary { data, .. } => {
writeln!(f, "{:?};", data.data)?
}
_ => (),
}
},
PointerInfo::Invalid => writeln!(f, "invalid;")?,
PointerInfo::Null => writeln!(f, "null;")?,
}
}
} else {
writeln!(
f,
"{} {}: {} = null;",
ident_str, field_name, field.type_name
)?;
}
}
FieldInfo::Pointer {
indirection_count: 2,
} => {
if inst.is_valid(field_name) {
let mut instances = inst.get_iter(field_name);
write!(
f,
"{} {}: {}[%?] = [",
ident_str,
field_name,
field.type_name,
//instances.len(),
)?;
if let Some(instance) = instances.next() {
write!(f, "{}", instance)?;
}
writeln!(f, "];")?;
} else {
writeln!(
f,
"{} {}: {}[] = null;",
ident_str, field_name, field.type_name
)?;
}
}
FieldInfo::FnPointer => writeln!(f, "{} {}: fn() = null", ident_str, field_name)?,
FieldInfo::PointerArray { dimensions, .. } => {
let mut instances = inst.get_iter(field_name);
writeln!(
f,
"{} {}: {}{:?} = [",
ident_str, field_name, field.type_name, dimensions,
)?;
if let Some(instance) = instances.next() {
if instance.data.code().is_none()
&& !seen_addresses.contains(&instance.memory_address())
{
seen_addresses.insert(instance.memory_address());
write!(f, "{} ", ident_str)?;
if instance.type_name == "Link" {
writeln!(f, "(not enough type information);")?
} else {
fmt_instance(seen_addresses, f, &instance, ident + 2)?;
}
} else {
write!(f, "{} @{}", ident_str, instance.memory_address())?;
}
}
writeln!(f, "{} ];", ident_str)?;
}
_ => unimplemented!("unknown type"),
}
}
writeln!(f, "{}}}", ident_str)
}
impl fmt::Display for Instance<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt_instance(&mut std::collections::HashSet::new(), f, self, 0)
}
}
fn parse_ptr_address(
data: &[u8],
pointer_size: PointerSize,
endianness: Endianness,
) -> Option<NonZeroU64> {
let address = match pointer_size {
PointerSize::Bits32 => u64::from(parse_u32(data, endianness)),
PointerSize::Bits64 => parse_u64(data, endianness),
};
NonZeroU64::new(address)
}
impl<'a> Instance<'a> {
pub fn dna(&self) -> &Dna {
self.dna
}
pub fn raw(&self) -> &RawBlend {
self.blend
}
/// If this `Instance` was created from a primary/root `Block` it will have a code. Possible codes include "OB" for
/// objects, "ME" for meshes, "CA" for cameras, etc.
/// # Panics
/// Panics if the instance's underlying data doesn't have a code
pub fn code(&self) -> [u8; 4] {
self.data.code().expect("instance doesn't have a code")
}
/// If this `Instance` was created from a primary/root or subsidiary `Block` it will have a memory address. Blender
/// dumps its memory into the blend file when saving and the old memory addresses are used the recreate the
/// connections between blocks when loading the file again.
/// # Panics
/// Panics if the instance underlying data doesn't have an old memory address.
pub fn memory_address(&self) -> NonZeroU64 {
self.data
.memory_address()
.expect("instance doesn't have memory address")
}
/// `expect_field` simplifies accessing a field since most of the time panicking is the correct response for an
/// invalid field name.
fn expect_field(&self, name: &str) -> &FieldTemplate {
match &self.fields.get(name) {
Some(field) => field,
None => panic!("invalid field '{}'", name),
}
}
fn parse_ptr_address(&self, data: &[u8]) -> Option<NonZeroU64> {
parse_ptr_address(
data,
self.blend.header.pointer_size,
self.blend.header.endianness,
)
}
/// Used internally to get a block behind a pointer.
/// # Panics
/// Panics if field.info is not FieldInfo::Pointer
///
/// Panics if the block pointed by this field is not Block::Principal or Block::Subsidiary
fn get_ptr(&self, field: &FieldTemplate) -> PointerInfo<'a> {
match field.info {
FieldInfo::Pointer { .. } => {}
_ => panic!(
"get_ptr can only be called for pointer fields. ({:?})",
field
),
}
if self.data.data().len() < field.data_start + field.data_len {
return PointerInfo::Invalid;
}
let address = self.parse_ptr_address(self.data.get(field.data_start, field.data_len));
match address {
None => PointerInfo::Null,
Some(address) => {
match self.blend.blocks.iter().find(|b| match b {
Block::Principal { memory_address, .. }
| Block::Subsidiary { memory_address, .. } => *memory_address == address,
_ => false,
}) {
Some(block) => PointerInfo::Block(block),
None => PointerInfo::Invalid,
}
}
}
}
/// Tests whether a field is valid and can be accessed using the `get` methods without panicking. Which `get`
/// method you have to use depends on the field type.
pub fn is_valid<T: AsRef<str>>(&self, name: T) -> bool {
let name = name.as_ref();
if !self.fields.contains_key(name) {
return false;
}
let field = self.expect_field(name);
match field.info {
FieldInfo::Pointer { indirection_count } if indirection_count == 1 => {
assert_eq!(
field.data_len,
size_of::<u64>(),
"field '{}' doesn't have enough data for a pointer address. ({:?})",
name,
field
);
let pointer = self.get_ptr(field);
match pointer {
PointerInfo::Null | PointerInfo::Invalid => false,
PointerInfo::Block(block) => match block {
Block::Principal { .. } | Block::Subsidiary { .. } => true,
_ => unimplemented!(),
},
}
}
FieldInfo::Pointer { indirection_count } if indirection_count == 2 => {
let pointer = self.get_ptr(field);
let block = match pointer {
PointerInfo::Block(block) => block,
PointerInfo::Null | PointerInfo::Invalid => return false,
};
let pointer_size = self.blend.header.pointer_size.bytes_num();
let pointer_count = match block {
Block::Principal { data, .. } | Block::Subsidiary { data, .. } => {
data.data.len() / pointer_size
}
_ => unimplemented!(),
};
for i in 0..pointer_count {
match block {
Block::Principal { data, .. } | Block::Subsidiary { data, .. } => {
let address = self.parse_ptr_address(&data.data[i * pointer_size..]);
//parse_u64(&block.data[i * ptr_size..], self.blend.header.endianness);
match address {
Some(address) => {
if !self.blend.blocks.iter().any(|b| match b {
Block::Principal { memory_address, .. }
| Block::Subsidiary { memory_address, .. } => {
*memory_address == address
}
_ => false,
}) {
return false;
} else {
continue;
}
}
None => return false,
}
}
_ => unimplemented!(),
}
}
true
}
FieldInfo::FnPointer => false,
FieldInfo::PointerArray { .. } => unimplemented!(), //todo: fix
FieldInfo::Value => {
if field.type_name == "ListBase" {
let instance = self.get(name);
instance.is_valid("first") && instance.is_valid("last")
} else {
true
}
}
FieldInfo::ValueArray { .. } => true,
_ => panic!(
"is_valid called for unknown field '{}'. ({:?})",
name, field,
),
}
}
/// `get_value` abstracts accessing primitives and is used by all `get_[]` functions (`get_i8`, `get_f32`, etc).
fn get_value<T: AsRef<str>, U: BlendPrimitive>(&self, name: T) -> U {
let name = name.as_ref();
let field = self.expect_field(name);
let blender_type_name = U::blender_name();
match field.info {
FieldInfo::Value if field.is_primitive /*&& field.type_name == blender_type_name*/ => {
assert_eq!(
field.data_len,
size_of::<U>(),
"field '{}' doesn't have enough data for a {}. ({:?})",
name,
blender_type_name,
field,
);
U::parse(
self.data.get(field.data_start, field.data_len),
self.blend.header.endianness,
)
}
_ => panic!(
"field '{}' is not {}. ({:?})",
name, blender_type_name, field
),
}
}
pub fn get_u8<T: AsRef<str>>(&self, name: T) -> u8 {
self.get_value(name)
}
pub fn get_i8<T: AsRef<str>>(&self, name: T) -> i8 {
self.get_value(name)
}
pub fn get_char<T: AsRef<str>>(&self, name: T) -> char {
self.get_u8(name) as char
}
pub fn get_u16<T: AsRef<str>>(&self, name: T) -> u16 {
self.get_value(name)
}
pub fn get_i16<T: AsRef<str>>(&self, name: T) -> i16 {
self.get_value(name)
}
pub fn get_i32<T: AsRef<str>>(&self, name: T) -> i32 {
self.get_value(name)
}
pub fn get_u32<T: AsRef<str>>(&self, name: T) -> u32 {
self.get_value(name)
}
pub fn get_f32<T: AsRef<str>>(&self, name: T) -> f32 {
self.get_value(name)
}
pub fn get_f64<T: AsRef<str>>(&self, name: T) -> f64 {
self.get_value(name)
}
pub fn get_u64<T: AsRef<str>>(&self, name: T) -> u64 {
self.get_value(name)
}
pub fn get_i64<T: AsRef<str>>(&self, name: T) -> i64 {
self.get_value(name)
}
/// `get_value_vec` abstracts accessing primitive arrays and is used by all `get_[]_vec` functions (`get_i8_vec`,
/// `get_f32_vec`, etc).
fn get_value_vec<T: AsRef<str>, U: BlendPrimitive>(&self, name: T) -> Vec<U> {
let name = name.as_ref();
let field = self.expect_field(name);
let blender_type_name = U::blender_name();
let data = match field.info {
FieldInfo::ValueArray { len, .. } if field.is_primitive => {
assert_eq!(
field.data_len / len,
size_of::<U>(),
"field '{}' doesn't have enough data for a {} array. ({:?})",
name,
blender_type_name,
field,
);
self.data.get(field.data_start, field.data_len)
}
FieldInfo::Pointer { indirection_count } if indirection_count == 1 => {
let pointer = self.get_ptr(field);
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | true |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/parsers/field.rs | src/parsers/field.rs | use nom::{
branch::alt,
bytes::complete::{tag, take_till, take_until},
combinator::complete,
error::{ErrorKind, ParseError},
multi::{many0, many1},
sequence::delimited,
Err, IResult,
};
#[derive(Debug)]
pub enum FieldParseError {
NomError {
kind: ErrorKind,
other: Option<Box<FieldParseError>>,
},
InvalidArraySize,
}
impl ParseError<&str> for FieldParseError {
fn from_error_kind(_input: &str, kind: ErrorKind) -> Self {
FieldParseError::NomError { kind, other: None }
}
fn append(_input: &str, kind: ErrorKind, other: Self) -> Self {
FieldParseError::NomError {
kind,
other: Some(Box::new(other)),
}
}
}
type Result<'a, T> = IResult<&'a str, T, FieldParseError>;
#[derive(Debug, Clone)]
pub enum FieldInfo {
Value,
ValueArray {
len: usize,
dimensions: Vec<usize>,
},
Pointer {
indirection_count: usize,
},
PointerArray {
indirection_count: usize,
len: usize,
dimensions: Vec<usize>,
},
FnPointer,
}
pub fn fn_pointer(input: &str) -> Result<(&str, FieldInfo)> {
let (input, name) = delimited(tag("(*"), take_until(")"), tag(")"))(input)?;
let (input, _) = delimited(tag("("), take_until(")"), tag(")"))(input)?;
Ok((input, (name, FieldInfo::FnPointer)))
}
fn array_dimensions(input: &str) -> Result<Vec<usize>> {
let (input, array_dimensions) =
many0(complete(delimited(tag("["), take_until("]"), tag("]"))))(input)?;
let mut dimensions_len = Vec::new();
for dimension_str in array_dimensions {
dimensions_len.push(
dimension_str
.parse::<usize>()
.map_err(|_| Err::Failure(FieldParseError::InvalidArraySize))?,
);
}
Ok((input, dimensions_len))
}
fn pointer(input: &str) -> Result<(&str, FieldInfo)> {
let (input, asterisks) = many1(tag("*"))(input)?;
let (input, name) = take_till(|c| c == '[')(input)?;
if !input.is_empty() {
let (input, dimensions) = array_dimensions(input)?;
let len = dimensions.iter().product();
Ok((
input,
(
name,
FieldInfo::PointerArray {
indirection_count: asterisks.len(),
len,
dimensions,
},
),
))
} else {
Ok((
input,
(
name,
FieldInfo::Pointer {
indirection_count: asterisks.len(),
},
),
))
}
}
fn value(input: &str) -> Result<(&str, FieldInfo)> {
let (input, name) = take_till(|c| c == '[')(input)?;
if !input.is_empty() {
let (input, dimensions) = array_dimensions(input)?;
let len = dimensions.iter().product();
Ok((
input,
(
name,
FieldInfo::ValueArray {
len,
dimensions,
},
),
))
} else {
Ok((input, (name, FieldInfo::Value)))
}
}
pub fn parse_field(input: &str) -> Result<(&str, FieldInfo)> {
alt((fn_pointer, pointer, value))(input)
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/parsers/dna.rs | src/parsers/dna.rs | use crate::parsers::{Endianness, PointerSize, Result};
use nom::{
bytes::complete::{tag, take, take_while},
combinator::map,
multi::count,
number::complete::{be_u16, be_u32, le_u16, le_u32},
sequence::terminated,
};
use std::convert::TryInto;
#[derive(Debug)]
pub struct Dna {
pub names: Vec<String>,
pub types: Vec<DnaType>,
pub structs: Vec<DnaStruct>,
}
#[derive(Debug)]
pub struct DnaType {
pub name: String,
pub bytes_len: usize, //size in bytes of the type
}
#[derive(Debug)]
pub struct DnaField {
pub type_index: usize, //index on Dna::types array
pub name_index: usize, //index on Dna::names array
}
#[derive(Debug)]
pub struct DnaStruct {
pub type_index: usize, //index on Dna::types array
pub fields: Vec<DnaField>,
}
#[derive(Debug)]
pub struct DnaParseContext {
endianness: Endianness,
_pointer_size: PointerSize,
}
impl DnaParseContext {
pub fn new(endianness: Endianness, pointer_size: PointerSize) -> Self {
Self {
endianness,
_pointer_size: pointer_size,
}
}
/// Panics if a u32 can't be converted to usize.
fn names<'a, 'b>(&'a self, input: &'b [u8]) -> Result<'b, Vec<String>>
where
'b: 'a,
{
let (input, _) = tag("NAME")(input)?;
let (input, names_len) = match self.endianness {
Endianness::Little => le_u32(input)?,
Endianness::Big => be_u32(input)?,
};
let all_names_len = std::cell::RefCell::new(0_usize);
let (input, names) = count(
terminated(
map(take_while(|b: u8| b != 0), |b: &[u8]| {
*all_names_len.borrow_mut() += b.len() + 1; //+1 for the null separator
String::from_utf8_lossy(b).into_owned()
}),
tag("\0"),
),
names_len.try_into().expect("u32 to usize"),
)(input)?;
let skip_len = {
let mut sum = *all_names_len.borrow();
let res = sum;
while sum % 4 != 0 {
sum += 1;
}
sum - res
};
let (input, _) = take(skip_len)(input)?;
Ok((input, names))
}
/// Panics if a u32 can't be converted to usize.
fn types<'a, 'b>(&'a self, input: &'b [u8]) -> Result<'b, Vec<DnaType>>
where
'b: 'a,
{
let (input, _) = tag("TYPE")(input)?;
let (input, types_len) = match self.endianness {
Endianness::Little => le_u32(input)?,
Endianness::Big => be_u32(input)?,
};
let types_len = types_len.try_into().expect("u32 to usize");
let all_type_names_len = std::cell::RefCell::new(0_usize);
let (input, type_names) = count(
terminated(
map(take_while(|b: u8| b != 0), |b: &[u8]| {
*all_type_names_len.borrow_mut() += b.len() + 1;
String::from_utf8_lossy(b).into_owned()
}),
tag("\0"),
),
types_len,
)(input)?;
let skip_len = {
let mut sum = *all_type_names_len.borrow();
let res = sum;
while sum % 4 != 0 {
sum += 1;
}
sum - res
};
let (input, _) = take(skip_len)(input)?;
let (input, _) = tag("TLEN")(input)?;
let (input, type_lenghts) = count(
match self.endianness {
Endianness::Little => le_u16,
Endianness::Big => be_u16,
},
types_len,
)(input)?;
let skip_len = {
let mut sum = types_len * 2;
let res = sum;
while sum % 4 != 0 {
sum += 1;
}
sum - res
};
let (input, _) = take(skip_len)(input)?;
Ok((
input,
type_names
.into_iter()
.zip(type_lenghts)
.map(|(name, length)| DnaType {
name,
bytes_len: length.try_into().expect("u32 to usize"),
})
.collect(),
))
}
/// Panics if a u32 can't be converted to usize.
fn structs<'b>(&self, input: &'b [u8]) -> Result<'b, Vec<DnaStruct>> {
let (input, _) = tag("STRC")(input)?;
let (input, structs_len) = match self.endianness {
Endianness::Little => le_u32(input)?,
Endianness::Big => be_u32(input)?,
};
let mut structs = Vec::new();
let mut final_input = input;
for _ in 0..structs_len {
let (input, struct_name_index) = match self.endianness {
Endianness::Little => le_u16(final_input)?,
Endianness::Big => be_u16(final_input)?,
};
let (input, fields_num) = match self.endianness {
Endianness::Little => le_u16(input)?,
Endianness::Big => be_u16(input)?,
};
let mut next_input = input;
let mut fields = Vec::new();
for _ in 0..fields_num {
let (input, field_type_index) = match self.endianness {
Endianness::Little => le_u16(next_input)?,
Endianness::Big => be_u16(next_input)?,
};
let (input, field_name_index) = match self.endianness {
Endianness::Little => le_u16(input)?,
Endianness::Big => be_u16(input)?,
};
next_input = input;
fields.push(DnaField {
type_index: field_type_index.try_into().expect("u32 to usize"),
name_index: field_name_index.try_into().expect("u32 to usize"),
});
}
final_input = next_input;
structs.push(DnaStruct {
type_index: struct_name_index.try_into().expect("u32 to usize"),
fields,
});
}
Ok((final_input, structs))
}
pub fn dna<'a, 'b>(&'a self, input: &'b [u8]) -> Result<'b, Dna>
where
'b: 'a,
{
let (input, _) = tag("SDNA")(input)?;
let (input, names) = self.names(input)?;
let (input, types) = self.types(input)?;
let (input, structs) = self.structs(input)?;
Ok((
input,
Dna {
names,
types,
structs,
},
))
}
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/parsers/primitive.rs | src/parsers/primitive.rs | use super::Endianness;
use nom::number::complete::{
be_f32, be_f64, be_i16, be_i32, be_i64, be_i8, be_u16, be_u32, be_u64, le_f32, le_f64, le_i16,
le_i32, le_i64, le_i8, le_u16, le_u32, le_u64,
};
pub(crate) trait BlendPrimitive {
fn parse(data: &[u8], endianness: Endianness) -> Self;
fn blender_name() -> &'static str;
}
impl BlendPrimitive for char {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_u8(data, endianness) as char
}
fn blender_name() -> &'static str {
"char"
}
}
impl BlendPrimitive for i8 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_i8(data, endianness)
}
fn blender_name() -> &'static str {
"char"
}
}
impl BlendPrimitive for u8 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_u8(data, endianness)
}
fn blender_name() -> &'static str {
"uchar"
}
}
impl BlendPrimitive for u16 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_u16(data, endianness)
}
fn blender_name() -> &'static str {
"ushort"
}
}
impl BlendPrimitive for i16 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_i16(data, endianness)
}
fn blender_name() -> &'static str {
"short"
}
}
impl BlendPrimitive for i32 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_i32(data, endianness)
}
fn blender_name() -> &'static str {
"int"
}
}
impl BlendPrimitive for u32 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_u32(data, endianness)
}
fn blender_name() -> &'static str {
"ulong"
}
}
impl BlendPrimitive for f32 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_f32(data, endianness)
}
fn blender_name() -> &'static str {
"float"
}
}
impl BlendPrimitive for f64 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_f64(data, endianness)
}
fn blender_name() -> &'static str {
"double"
}
}
impl BlendPrimitive for u64 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_u64(data, endianness)
}
fn blender_name() -> &'static str {
"uint64_t"
}
}
impl BlendPrimitive for i64 {
fn parse(data: &[u8], endianness: Endianness) -> Self {
parse_i64(data, endianness)
}
fn blender_name() -> &'static str {
"int64_t"
}
}
pub fn parse_i8(slice: &[u8], endianness: Endianness) -> i8 {
let (_, val) = match endianness {
Endianness::Little => le_i8::<_, ()>(slice).expect("parse i8"),
Endianness::Big => be_i8::<_, ()>(slice).expect("parse i8"),
};
val
}
pub fn parse_u8(slice: &[u8], _endianness: Endianness) -> u8 {
*slice.first().expect("parse u8")
}
pub fn parse_u16(slice: &[u8], endianness: Endianness) -> u16 {
let (_, val) = match endianness {
Endianness::Little => le_u16::<_, ()>(slice).expect("parse u16"),
Endianness::Big => be_u16::<_, ()>(slice).expect("parse u16"),
};
val
}
pub fn parse_i16(slice: &[u8], endianness: Endianness) -> i16 {
let (_, val) = match endianness {
Endianness::Little => le_i16::<_, ()>(slice).expect("parse i16"),
Endianness::Big => be_i16::<_, ()>(slice).expect("parse i16"),
};
val
}
pub fn parse_i32(slice: &[u8], endianness: Endianness) -> i32 {
let (_, val) = match endianness {
Endianness::Little => le_i32::<_, ()>(slice).expect("parse i32"),
Endianness::Big => be_i32::<_, ()>(slice).expect("parse i32"),
};
val
}
pub fn parse_f32(slice: &[u8], endianness: Endianness) -> f32 {
let (_, val) = match endianness {
Endianness::Little => le_f32::<_, ()>(slice).expect("parse f32"),
Endianness::Big => be_f32::<_, ()>(slice).expect("parse f32"),
};
val
}
pub fn parse_f64(slice: &[u8], endianness: Endianness) -> f64 {
let (_, val) = match endianness {
Endianness::Little => le_f64::<_, ()>(slice).expect("parse f64"),
Endianness::Big => be_f64::<_, ()>(slice).expect("parse f64"),
};
val
}
pub fn parse_u32(slice: &[u8], endianness: Endianness) -> u32 {
let (_, val) = match endianness {
Endianness::Little => le_u32::<_, ()>(slice).expect("parse u32"),
Endianness::Big => be_u32::<_, ()>(slice).expect("parse u32"),
};
val
}
pub fn parse_i64(slice: &[u8], endianness: Endianness) -> i64 {
let (_, val) = match endianness {
Endianness::Little => le_i64::<_, ()>(slice).expect("parse i64"),
Endianness::Big => be_i64::<_, ()>(slice).expect("parse i64"),
};
val
}
pub fn parse_u64(slice: &[u8], endianness: Endianness) -> u64 {
let (_, val) = match endianness {
Endianness::Little => le_u64::<_, ()>(slice).expect("parse u64"),
Endianness::Big => be_u64::<_, ()>(slice).expect("parse u64"),
};
val
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/parsers/mod.rs | src/parsers/mod.rs | pub mod blend;
pub mod dna;
pub mod field;
pub mod primitive;
use nom::{
error::{ErrorKind, ParseError},
IResult,
};
use std::io;
type Result<'a, T> = IResult<&'a [u8], T, BlendParseError>;
/// Size of a pointer on the machine used to create the .blend file.
#[derive(Debug, Copy, Clone)]
pub enum PointerSize {
Bits32,
Bits64,
}
impl PointerSize {
/// Returns the pointer size in bytes.
pub fn bytes_num(self) -> usize {
match self {
PointerSize::Bits32 => 4,
PointerSize::Bits64 => 8,
}
}
}
/// Endianness of the machine used to create the .blend file.
#[derive(Debug, Copy, Clone)]
pub enum Endianness {
Little,
Big,
}
/// Errors that can happen during the initial parsing of the .blend file.
/// Most errors are simply `NomError` but a few of them are specific either
/// for better error reporting or due to custom logic.
#[derive(Debug)]
pub enum BlendParseError {
NomError {
kind: ErrorKind,
other: Option<Box<BlendParseError>>,
},
IoError(io::Error),
/// Returned when the file is incomplete.
NotEnoughData,
/// The known block codes are `b"REND"`, `b"TEST"`, `b"GLOB"`, `b"DATA"` and any two-digit code
/// like `b"OB\0\0"` for objects. Anything different from that returns `UnknownBlockCode`
UnknownBlockCode,
/// Principal blocks are assumed to never be lists even though it is possible. This is done
/// to simplify the API. No version of a blend file was found where this isn't true.
UnsupportedCountOnPrincipalBlock,
/// This error happens if a block has a memory address equal to `0`. This should be impossible
/// as `0` represents a null pointer.
InvalidMemoryAddress,
/// Returned when the DNA block is not found at the end of the blend file.
NoDnaBlockFound,
/// Returned when the file doesn't start with `b"BLENDER"`. The assumption is that the file
/// is a gzip compressed blend file, but this isn't actually tested for.
CompressedFileNotSupported,
}
impl ParseError<&[u8]> for BlendParseError {
fn from_error_kind(_input: &[u8], kind: ErrorKind) -> Self {
BlendParseError::NomError { kind, other: None }
}
fn append(_input: &[u8], kind: ErrorKind, other: Self) -> Self {
BlendParseError::NomError {
kind,
other: Some(Box::new(other)),
}
}
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/src/parsers/blend.rs | src/parsers/blend.rs | use crate::parsers::{
dna::{Dna, DnaParseContext},
BlendParseError, Endianness, PointerSize, Result,
};
use nom::{
branch::alt,
bytes::complete::{tag, take},
multi::many_till,
number::complete::{be_u32, be_u64, le_u32, le_u64},
sequence::tuple,
Err,
};
use std::{
convert::TryInto,
fmt::{self, Debug, Formatter},
io::Read,
num::NonZeroU64,
path::Path,
result::Result as StdResult,
};
pub struct BlockData {
/// The entire binary data of the `Block` in the blend file.
pub data: Vec<u8>,
/// The data field can contain more than one struct, count tells us how many there is.
pub count: usize,
}
impl Debug for BlockData {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "len/count: {}/{}", self.data.len(), self.count)
}
}
/// Represents all possible block types found in the blend file.
/// `Rend`, `Test` and `Global` are ignored by this crate but are still represented here.
#[derive(Debug)]
pub enum Block {
Rend,
Test,
Global {
memory_address: NonZeroU64,
dna_index: usize,
data: BlockData,
},
/// A principal (or root) block is defined by having a two digit code and by the fact that its `dna_index` is always
/// valid. If we have a pointer to a principal block, we can ignore the type of the pointer and use the block type.
Principal {
code: [u8; 2],
memory_address: NonZeroU64,
dna_index: usize,
data: BlockData,
},
/// Subsidiary blocks are defined by having the code "DATA", which is ommited here. Their `dna_index` is not
/// always correct and is only used when whichever field points to them has an "invalid" type (like void*).
Subsidiary {
memory_address: NonZeroU64,
dna_index: usize,
data: BlockData,
},
/// The DNA of the blend file. Used to interpret all the other blocks.
Dna(Dna),
}
#[derive(Debug, Clone)]
pub struct Header {
/// The size of the pointer on the machine used to save the blend file.
pub pointer_size: PointerSize,
/// The endianness on the machine used to save the blend file.
pub endianness: Endianness,
/// The version of Blender used to save the blend file.
pub version: [u8; 3],
}
fn pointer_size_bits32(input: &[u8]) -> Result<PointerSize> {
let (input, _) = tag("_")(input)?;
Ok((input, PointerSize::Bits32))
}
fn pointer_size_bits64(input: &[u8]) -> Result<PointerSize> {
let (input, _) = tag("-")(input)?;
Ok((input, PointerSize::Bits64))
}
pub fn pointer_size(input: &[u8]) -> Result<PointerSize> {
alt((pointer_size_bits32, pointer_size_bits64))(input)
}
fn endianness_litte(input: &[u8]) -> Result<Endianness> {
let (input, _) = tag("v")(input)?;
Ok((input, Endianness::Little))
}
fn endianness_big(input: &[u8]) -> Result<Endianness> {
let (input, _) = tag("V")(input)?;
Ok((input, Endianness::Big))
}
pub fn endianness(input: &[u8]) -> Result<Endianness> {
alt((endianness_litte, endianness_big))(input)
}
pub fn version(input: &[u8]) -> Result<[u8; 3]> {
let (input, v) = take(3_usize)(input)?;
Ok((input, [v[0], v[1], v[2]]))
}
pub fn header(input: &[u8]) -> Result<Header> {
let (input, _) = match tag::<_, _, BlendParseError>("BLENDER")(input) {
Ok(v) => v,
Err(_) => {
return Err(nom::Err::Failure(
BlendParseError::CompressedFileNotSupported,
))
}
};
let (input, (pointer_size, endianness, version)) =
tuple((pointer_size, endianness, version))(input)?;
Ok((
input,
Header {
pointer_size,
endianness,
version,
},
))
}
pub fn block_header_code(input: &[u8]) -> Result<[u8; 4]> {
let (input, v) = take(4_usize)(input)?;
Ok((input, [v[0], v[1], v[2], v[3]]))
}
#[derive(Debug)]
pub struct RawBlend {
pub header: Header,
pub blocks: Vec<Block>,
pub dna: Dna,
}
impl RawBlend {
/// Returns a new `Blend` instance from `data`.
pub fn from_data<T: Read>(mut data: T) -> StdResult<Self, BlendParseError> {
let mut buffer = Vec::new();
data.read_to_end(&mut buffer)
.map_err(BlendParseError::IoError)?;
let mut parser = BlendParseContext::default();
let res = parser.blend(&buffer);
match res {
Ok((_, blend)) => Ok(blend),
Err(Err::Failure(e)) | Err(Err::Error(e)) => Err(e),
Err(Err::Incomplete(..)) => Err(BlendParseError::NotEnoughData),
}
}
/// Returns a new `Blend` instance from a path.
pub fn from_path<P: AsRef<Path>>(path: P) -> StdResult<Self, BlendParseError> {
use std::fs::File;
let file = File::open(path).map_err(BlendParseError::IoError)?;
RawBlend::from_data(file)
}
}
#[derive(Default)]
pub enum BlendParseContext {
#[default]
Empty,
ParsedHeader(Header),
}
impl BlendParseContext {
fn memory_address<'a>(&self, input: &'a [u8]) -> Result<'a, NonZeroU64> {
match self {
BlendParseContext::ParsedHeader(header) => {
let read_len: usize = match header.pointer_size {
PointerSize::Bits32 => 4,
PointerSize::Bits64 => 8,
};
let (input, data) = take(read_len)(input)?;
let (_, address) = match (&header.endianness, &header.pointer_size) {
(Endianness::Little, PointerSize::Bits32) => {
le_u32(data).map(|(i, n)| (i, u64::from(n)))?
}
(Endianness::Big, PointerSize::Bits32) => {
be_u32(data).map(|(i, n)| (i, u64::from(n)))?
}
(Endianness::Little, PointerSize::Bits64) => le_u64(data)?,
(Endianness::Big, PointerSize::Bits64) => be_u64(data)?,
};
if let Some(address) = NonZeroU64::new(address) {
Ok((input, address))
} else {
Err(Err::Failure(BlendParseError::InvalidMemoryAddress))
}
}
BlendParseContext::Empty => unreachable!("Header should be parsed here"),
}
}
/// Panics if a u32 can't be converted to usize in your system.
fn block<'a, 'b>(&'a self, input: &'b [u8]) -> Result<'b, Block>
where
'b: 'a,
{
match self {
BlendParseContext::ParsedHeader(header) => {
let (input, code) = block_header_code(input)?;
let (input, size): (_, usize) = match header.endianness {
Endianness::Little => {
le_u32(input).map(|(i, n)| (i, n.try_into().expect("u32 to usize")))?
}
Endianness::Big => {
be_u32(input).map(|(i, n)| (i, n.try_into().expect("u32 to usize")))?
}
};
let (input, memory_address) = self.memory_address(input)?;
let (input, dna_index) = match header.endianness {
Endianness::Little => le_u32(input)?,
Endianness::Big => be_u32(input)?,
};
let (input, count) = match header.endianness {
Endianness::Little => le_u32(input)?,
Endianness::Big => be_u32(input)?,
};
let (input, block_data) = take(size)(input)?;
//Assumption: These block codes will always exist
let block = match &code {
b"REND" => Block::Rend,
b"TEST" => Block::Test,
b"GLOB" => Block::Global {
memory_address,
dna_index: dna_index.try_into().expect("u32 to usize"),
data: BlockData {
data: block_data.to_vec(),
count: count.try_into().expect("u32 to usize"),
},
},
b"DATA" => Block::Subsidiary {
memory_address,
dna_index: dna_index.try_into().expect("u32 to usize"),
data: BlockData {
data: block_data.to_vec(),
count: count.try_into().expect("u32 to usize"),
},
},
b"DNA1" => {
let ctx = DnaParseContext::new(header.endianness, header.pointer_size);
let (_, dna) = ctx.dna(block_data)?;
Block::Dna(dna)
}
&[code1, code2, 0, 0] => {
if count != 1 {
return Err(Err::Failure(
BlendParseError::UnsupportedCountOnPrincipalBlock,
));
} else {
Block::Principal {
code: [code1, code2],
memory_address,
dna_index: dna_index.try_into().expect("u32 to usize"),
data: BlockData {
data: block_data.to_vec(),
count: 1,
},
}
}
}
_ => return Err(Err::Failure(BlendParseError::UnknownBlockCode)),
};
Ok((input, block))
}
BlendParseContext::Empty => unreachable!("Header should be parsed here"),
}
}
pub fn blend<'a, 'b>(&'a mut self, input: &'b [u8]) -> Result<'b, RawBlend>
where
'b: 'a,
{
let (input, header) = header(input)?;
//This has to happen before the rest of the parser runs
*self = BlendParseContext::ParsedHeader(header.clone());
let (input, (mut blocks, _)) = many_till(move |d| self.block(d), tag("ENDB"))(input)?;
let dna = if let Some(Block::Dna(dna)) = blocks.pop() {
// Assumption: The DNA block is always the last one
dna
} else {
return Err(Err::Failure(BlendParseError::NoDnaBlockFound));
};
Ok((
input,
RawBlend {
blocks,
dna,
header,
},
))
}
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/examples/names_positions.rs | examples/names_positions.rs | use blend::Blend;
use std::{env, path};
fn print_names_and_positions(file_name: &str) {
let base_path = path::PathBuf::from(
env::var_os("CARGO_MANIFEST_DIR").expect("could not find cargo manifest dir"),
);
let blend_path = base_path.join(format!("examples/blend_files/{}", file_name));
let blend = Blend::from_path(blend_path).expect("error loading blend file");
for obj in blend.instances_with_code(*b"OB") {
let loc = obj.get_f32_vec("loc");
let name = obj.get("id").get_string("name");
println!("\"{}\" at {:?}", name, loc);
}
}
fn main() {
print_names_and_positions("2_80.blend");
print_names_and_positions("2_90.blend");
print_names_and_positions("3_0.blend");
print_names_and_positions("3_5.blend");
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/examples/read_mesh_280.rs | examples/read_mesh_280.rs | use blend::{Blend, Instance};
use std::{env, path};
type Vertex = ([f32; 3], [f32; 3], [f32; 2]);
type Face = [Vertex; 3];
#[derive(Debug)]
struct Mesh {
_faces: Vec<Face>,
}
#[derive(Debug)]
struct Object {
_name: String,
_location: [f32; 3],
_rotation: [f32; 3],
_scale: [f32; 3],
_mesh: Mesh,
}
// This is only valid for meshes with triangular faces
fn instance_to_mesh(mesh: Instance) -> Option<Mesh> {
if !mesh.is_valid("mpoly")
|| !mesh.is_valid("mloop")
|| !mesh.is_valid("mloopuv")
|| !mesh.is_valid("mvert")
{
return None;
}
let faces = mesh.get_iter("mpoly").collect::<Vec<_>>();
let loops = mesh.get_iter("mloop").collect::<Vec<_>>();
let uvs = mesh.get_iter("mloopuv").collect::<Vec<_>>();
let verts = mesh.get_iter("mvert").collect::<Vec<_>>();
let mut index_count = 0;
let mut face_indice_count = 0;
for face in &faces {
let len = face.get_i32("totloop");
let mut indexi = 1;
while indexi < len {
face_indice_count += 3;
indexi += 2;
}
}
let mut uv_buffer = vec![0f32; face_indice_count * 2];
let mut normal_buffer = vec![0f32; face_indice_count * 3];
let mut verts_array_buff = vec![0f32; face_indice_count * 3];
for face in &faces {
let len = face.get_i32("totloop");
let start = face.get_i32("loopstart");
let mut indexi = 1;
while indexi < len {
let mut index;
for l in 0..3 {
if (indexi - 1) + l < len {
index = start + (indexi - 1) + l;
} else {
index = start;
}
let v = loops[index as usize].get_i32("v");
let vert = &verts[v as usize];
let co = vert.get_f32_vec("co");
verts_array_buff[index_count * 3] = co[0];
verts_array_buff[index_count * 3 + 1] = co[1];
verts_array_buff[index_count * 3 + 2] = co[2];
//Normals are compressed into 16 bit integers
let no = vert.get_i16_vec("no");
normal_buffer[index_count * 3] = f32::from(no[0]) / 32767.0;
normal_buffer[index_count * 3 + 1] = f32::from(no[1]) / 32767.0;
normal_buffer[index_count * 3 + 2] = f32::from(no[2]) / 32767.0;
let uv = uvs[index as usize].get_f32_vec("uv");
let uv_x = uv[0];
let uv_y = uv[1];
uv_buffer[index_count * 2] = uv_x;
uv_buffer[index_count * 2 + 1] = uv_y;
index_count += 1;
}
indexi += 2;
}
}
let faces: Vec<_> = (&verts_array_buff[..])
.chunks(3)
.enumerate()
.map(|(i, pos)| {
(
[pos[0], pos[1], pos[2]],
[
normal_buffer[i * 3],
normal_buffer[i * 3 + 1],
normal_buffer[i * 3 + 2],
],
[uv_buffer[i * 2], uv_buffer[i * 2 + 1]],
)
})
.collect::<Vec<Vertex>>();
let faces: Vec<_> = faces.chunks(3).map(|f| [f[0], f[1], f[2]]).collect();
Some(Mesh { _faces: faces })
}
fn main() {
let base_path = path::PathBuf::from(
env::var_os("CARGO_MANIFEST_DIR").expect("could not find cargo manifest dir"),
);
let blend_path = base_path.join("examples/blend_files/2_80.blend");
let blend = Blend::from_path(blend_path).expect("error loading blend file");
let mut objects = Vec::new();
for obj in blend.instances_with_code(*b"OB") {
if obj.is_valid("data") && obj.get("data").code()[0..=1] == *b"ME" {
let loc = obj.get_f32_vec("loc");
let rot = obj.get_f32_vec("rot");
let size = obj.get_f32_vec("size");
let data = obj.get("data");
if let Some(mesh) = instance_to_mesh(data) {
objects.push(Object {
_name: obj.get("id").get_string("name"),
_location: [loc[0], loc[1], loc[2]],
_rotation: [rot[0], rot[1], rot[2]],
_scale: [size[0], size[1], size[2]],
_mesh: mesh,
});
}
}
}
println!("{:#?}", objects);
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
lukebitts/blend | https://github.com/lukebitts/blend/blob/6c4f0b55c6314441c9482097ac33497320326aaf/examples/print_blend/main.rs | examples/print_blend/main.rs | use blend::Blend;
use libflate::gzip::Decoder;
use std::{
env,
fs::File,
io::{self, BufWriter, Read, Write},
path::{self, PathBuf},
};
fn print_blend(file_name: impl AsRef<str>) -> Result<(), io::Error> {
let file_name = file_name.as_ref();
let base_path = path::PathBuf::from(
env::var_os("CARGO_MANIFEST_DIR").expect("could not find cargo manifest dir"),
);
let blend_path = base_path.join(format!("examples/blend_files/{}", file_name));
let output_path = base_path.join(format!("examples/print_blend/output_{}.txt", file_name));
println!("{}", blend_path.display());
let mut file = File::open(blend_path)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
if data[0..7] != *b"BLENDER" {
let mut decoder = Decoder::new(&data[..])?;
let mut gzip_data = Vec::new();
decoder.read_to_end(&mut gzip_data)?;
data = gzip_data;
}
let blend = Blend::new(&data[..]).expect("error loading blend file");
let mut output_path_without_file = PathBuf::from(&output_path);
output_path_without_file.pop();
std::fs::create_dir_all(&output_path_without_file)?;
let mut buffer = BufWriter::new(File::create(output_path)?);
for o in blend.root_instances() {
write!(buffer, "{}", o)?;
}
writeln!(buffer)?;
buffer.flush()?;
println!("done: {}", file_name);
Ok(())
}
pub fn main() -> Result<(), io::Error> {
print_blend("2_80.blend")?;
print_blend("2_90.blend")?;
print_blend("3_0.blend")?;
print_blend("3_5.blend")?;
Ok(())
}
| rust | MIT | 6c4f0b55c6314441c9482097ac33497320326aaf | 2026-01-04T20:18:08.827467Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/lockfile.rs | src/lockfile.rs | use std::fs::{self, File, OpenOptions};
use std::io::prelude::*;
use std::io::{self, ErrorKind};
use std::path::{Path, PathBuf};
#[derive(Debug)]
pub struct Lockfile {
file_path: PathBuf,
lock_path: PathBuf,
pub lock: Option<File>,
}
impl Lockfile {
pub fn new(path: &Path) -> Lockfile {
Lockfile {
file_path: path.to_path_buf(),
lock_path: path.with_extension("lock"),
lock: None,
}
}
pub fn hold_for_update(&mut self) -> Result<(), std::io::Error> {
if self.lock.is_none() {
let open_file = OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open(&self.lock_path)?;
self.lock = Some(open_file);
}
Ok(())
}
pub fn write(&mut self, contents: &str) -> Result<(), std::io::Error> {
self.write_bytes(contents.as_bytes())
}
pub fn write_bytes(&mut self, data: &[u8]) -> Result<(), std::io::Error> {
self.raise_on_stale_lock()?;
let mut lock = self.lock.as_ref().unwrap();
lock.write_all(data)?;
Ok(())
}
pub fn commit(&mut self) -> Result<(), std::io::Error> {
self.raise_on_stale_lock()?;
self.lock = None;
fs::rename(&self.lock_path, &self.file_path)?;
Ok(())
}
pub fn rollback(&mut self) -> Result<(), std::io::Error> {
self.raise_on_stale_lock()?;
fs::remove_file(&self.lock_path)?;
self.lock = None;
Ok(())
}
fn raise_on_stale_lock(&self) -> Result<(), std::io::Error> {
if self.lock.is_none() {
Err(io::Error::new(
ErrorKind::Other,
format!("Not holding lock on file: {:?}", self.lock_path),
))
} else {
Ok(())
}
}
}
impl Read for Lockfile {
fn read(&mut self, mut buf: &mut [u8]) -> Result<usize, io::Error> {
self.raise_on_stale_lock()?;
let mut lock = self.lock.as_ref().unwrap();
lock.read(&mut buf)
}
}
impl Write for Lockfile {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.raise_on_stale_lock()?;
let mut lock = self.lock.as_ref().unwrap();
lock.write(buf)
}
fn flush(&mut self) -> Result<(), io::Error> {
let mut lock = self.lock.as_ref().unwrap();
lock.flush()
}
}
impl<'a> Read for &'a Lockfile {
fn read(&mut self, mut buf: &mut [u8]) -> Result<usize, io::Error> {
self.raise_on_stale_lock()?;
let mut lock = self.lock.as_ref().unwrap();
lock.read(&mut buf)
}
}
impl<'a> Write for &'a Lockfile {
fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> {
self.raise_on_stale_lock()?;
let mut lock = self.lock.as_ref().unwrap();
lock.write(buf)
}
fn flush(&mut self) -> Result<(), io::Error> {
let mut lock = self.lock.as_ref().unwrap();
lock.flush()
}
}
impl Drop for Lockfile {
fn drop(&mut self) {
if self.lock.is_some() {
fs::remove_file(&self.lock_path).expect("Could not delete lockfile");
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/refs.rs | src/refs.rs | use crate::lockfile::Lockfile;
use crate::util;
use regex::{Regex, RegexSet};
use std::fs::{self, DirEntry, File};
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::cmp::{Ord, Ordering};
use std::collections::HashMap;
lazy_static! {
static ref INVALID_FILENAME: RegexSet = {
RegexSet::new(&[
r"^\.",
r"/\.",
r"\.\.",
r"/$",
r"\.lock$",
r"@\{",
r"[\x00-\x20*:?\[\\^~\x7f]",
])
.unwrap()
};
static ref SYMREF: Regex = Regex::new(r"^ref: (.+)$").unwrap();
}
#[derive(Debug, PartialEq, Eq, PartialOrd)]
pub enum Ref {
Ref { oid: String },
SymRef { path: String },
}
impl Ref {
pub fn is_head(&self) -> bool {
match self {
Ref::Ref { .. } => false,
Ref::SymRef { path } => path == "HEAD",
}
}
pub fn path(&self) -> &str {
match self {
Ref::Ref { .. } => unimplemented!(),
Ref::SymRef { path } => path,
}
}
}
impl Ord for Ref {
fn cmp(&self, other: &Ref) -> Ordering {
match (self, other) {
(Ref::Ref { .. }, Ref::SymRef { ..} ) => Ordering::Less,
(Ref::SymRef { .. }, Ref::Ref { ..} ) => Ordering::Greater,
(Ref::SymRef { path: a }, Ref::SymRef { path: b } ) => a.cmp(b),
(Ref::Ref { oid: a }, Ref::Ref { oid: b } ) => a.cmp(b),
}
}
}
pub struct Refs {
pathname: PathBuf,
}
impl Refs {
pub fn new(pathname: &Path) -> Refs {
Refs {
pathname: pathname.to_path_buf(),
}
}
fn head_path(&self) -> PathBuf {
(*self.pathname).join("HEAD")
}
fn refs_path(&self) -> PathBuf {
(*self.pathname).join("refs")
}
fn heads_path(&self) -> PathBuf {
(*self.pathname).join("refs/heads")
}
pub fn update_ref_file(&self, path: &Path, oid: &str) -> Result<(), std::io::Error> {
let mut lock = Lockfile::new(path);
lock.hold_for_update()?;
Self::write_lockfile(lock, &oid)
}
pub fn update_head(&self, oid: &str) -> Result<(), std::io::Error> {
self.update_symref(&self.head_path(), oid)
}
pub fn set_head(&self, revision: &str, oid: &str) -> Result<(), std::io::Error> {
let path = self.heads_path().join(revision);
if path.exists() {
let relative = util::relative_path_from(Path::new(&path), &self.pathname);
self.update_ref_file(&self.head_path(), &format!("ref: {}", relative))
} else {
self.update_ref_file(&self.head_path(), oid)
}
}
pub fn read_head(&self) -> Option<String> {
self.read_symref(&self.head_path())
}
fn path_for_name(&self, name: &str) -> Option<PathBuf> {
let prefixes = [self.pathname.clone(), self.refs_path(), self.heads_path()];
for prefix in &prefixes {
if prefix.join(name).exists() {
return Some(prefix.join(name));
}
}
None
}
pub fn read_ref(&self, name: &str) -> Option<String> {
if let Some(path) = self.path_for_name(name) {
self.read_symref(&path)
} else {
None
}
}
/// Folows chain of references to resolve to an object ID
pub fn read_oid(&self, r#ref: &Ref) -> Option<String> {
match r#ref {
Ref::Ref { oid } => Some(oid.to_string()),
Ref::SymRef { path } => self.read_ref(&path),
}
}
pub fn read_oid_or_symref(path: &Path) -> Option<Ref> {
if path.exists() {
let mut file = File::open(path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if let Some(caps) = SYMREF.captures(&contents.trim()) {
Some(Ref::SymRef {
path: caps[1].to_string(),
})
} else {
Some(Ref::Ref {
oid: contents.trim().to_string(),
})
}
} else {
None
}
}
pub fn read_symref(&self, path: &Path) -> Option<String> {
let r#ref = Self::read_oid_or_symref(path);
match r#ref {
Some(Ref::SymRef { path }) => self.read_symref(&self.pathname.join(&path)),
Some(Ref::Ref { oid }) => Some(oid),
None => None,
}
}
pub fn update_symref(&self, path: &Path, oid: &str) -> Result<(), std::io::Error> {
let mut lock = Lockfile::new(path);
lock.hold_for_update()?;
let r#ref = Self::read_oid_or_symref(path);
match r#ref {
None | Some(Ref::Ref { .. }) => Self::write_lockfile(lock, &oid),
Some(Ref::SymRef { path }) => self.update_symref(&self.pathname.join(path), oid),
}
}
fn write_lockfile(mut lock: Lockfile, oid: &str) -> Result<(), io::Error> {
lock.write(&oid)?;
lock.write("\n")?;
lock.commit()
}
pub fn current_ref(&self, source: &str) -> Ref {
let r#ref = Self::read_oid_or_symref(&self.pathname.join(source));
match r#ref {
Some(Ref::SymRef { path }) => self.current_ref(&path),
Some(Ref::Ref { .. }) | None => Ref::SymRef {
path: source.to_string(),
},
}
}
pub fn create_branch(&self, branch_name: &str, start_oid: &str) -> Result<(), String> {
let path = self.heads_path().join(branch_name);
if INVALID_FILENAME.matches(branch_name).into_iter().count() > 0 {
return Err(format!("{} is not a valid branch name.\n", branch_name));
}
if path.as_path().exists() {
return Err(format!("A branch named {} already exists.\n", branch_name));
}
File::create(&path).expect("failed to create refs file for branch");
self.update_ref_file(&path, start_oid)
.map_err(|e| e.to_string())
}
pub fn list_branches(&self) -> Vec<Ref> {
self.list_refs(&self.heads_path())
}
fn name_to_symref(&self, name: DirEntry) -> Vec<Ref> {
let path = name.path();
if path.is_dir() {
self.list_refs(&path)
} else {
let path = util::relative_path_from(&path, &self.pathname);
vec![Ref::SymRef { path }]
}
}
fn list_refs(&self, dirname: &Path) -> Vec<Ref> {
fs::read_dir(self.pathname.join(dirname))
.expect("failed to read dir")
.flat_map(|name| self.name_to_symref(name.unwrap()))
.collect()
}
fn list_all_refs(&self) -> Vec<Ref> {
let mut all_refs = vec![Ref::SymRef { path: "HEAD".to_string() }];
let mut refs = self.list_refs(&self.refs_path());
all_refs.append(&mut refs);
all_refs
}
pub fn reverse_refs(&self) -> HashMap<String, Vec<Ref>> {
let mut table : HashMap<String, Vec<Ref>> = HashMap::new();
let all_refs = self.list_all_refs();
for r#ref in all_refs {
let oid = self.read_oid(&r#ref).unwrap(); // TODO: handle error
let oid_refs = table.get_mut(&oid);
if let Some(oid_refs) = oid_refs {
oid_refs.push(r#ref);
} else {
table.insert(oid, vec![r#ref]);
}
}
table
}
pub fn ref_short_name(&self, r#ref: &Ref) -> String {
match r#ref {
Ref::Ref { oid: _ } => unimplemented!(),
Ref::SymRef { path } => {
let path = self.pathname.join(path);
let dirs = [self.heads_path(), self.pathname.clone()];
let prefix = dirs.iter().find(|dir| {
path.parent()
.expect("failed to get parent")
.ancestors()
.any(|parent| &parent == dir)
});
let prefix = prefix.expect("could not find prefix");
util::relative_path_from(&path, prefix)
}
}
}
pub fn delete_branch(&self, branch_name: &str) -> Result<String, String> {
let path = self.heads_path().join(branch_name);
let mut lockfile = Lockfile::new(&path);
lockfile.hold_for_update().map_err(|e| e.to_string())?;
if let Some(oid) = self.read_symref(&path) {
fs::remove_file(path).map_err(|e| e.to_string())?;
// To remove the .lock file
lockfile.rollback().map_err(|e| e.to_string())?;
Ok(oid)
} else {
return Err(format!("branch {} not found", branch_name));
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/revision.rs | src/revision.rs | use crate::database::{commit, Database, ParsedObject};
use crate::repository::Repository;
use regex::{Regex, RegexSet};
use std::collections::HashMap;
use std::fmt;
lazy_static! {
static ref INVALID_NAME: RegexSet = {
RegexSet::new(&[
r"^\.",
r"/\.",
r"\.\.",
r"/$",
r"\.lock$",
r"@\{",
r"[\x00-\x20*:?\[\\^~\x7f]",
])
.unwrap()
};
static ref PARENT: Regex = Regex::new(r"^(.+)\^$").unwrap();
static ref ANCESTOR: Regex = Regex::new(r"^(.+)~(\d+)$").unwrap();
static ref REF_ALIASES: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("@", "HEAD");
m
};
}
#[derive(Debug, Clone)]
pub struct HintedError {
pub message: String,
pub hint: Vec<String>,
}
impl fmt::Display for HintedError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "{}", self.message)?;
for line in &self.hint {
writeln!(f, "hint: {}", line)?;
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub enum Rev {
Ref { name: String },
Parent { rev: Box<Rev> },
Ancestor { rev: Box<Rev>, n: i32 },
}
pub struct Revision<'a> {
repo: &'a mut Repository,
query: Rev,
expr: String,
errors: Vec<HintedError>,
}
impl<'a> Revision<'a> {
pub fn new(repo: &'a mut Repository, expr: &str) -> Revision<'a> {
Revision {
repo,
expr: expr.to_string(),
query: Self::parse(expr).expect("Revision parse failed"),
errors: vec![],
}
}
pub fn parse(revision: &str) -> Option<Rev> {
if let Some(caps) = PARENT.captures(revision) {
let rev = Revision::parse(&caps[1]).expect("parsing parent rev failed");
return Some(Rev::Parent { rev: Box::new(rev) });
} else if let Some(caps) = ANCESTOR.captures(revision) {
let rev = Revision::parse(&caps[1]).expect("parsing ancestor rev failed");
return Some(Rev::Ancestor {
rev: Box::new(rev),
n: i32::from_str_radix(&caps[2], 10).expect("could not parse ancestor number"),
});
} else if Revision::is_valid_ref(revision) {
let rev = REF_ALIASES.get(revision).unwrap_or(&revision);
Some(Rev::Ref {
name: rev.to_string(),
})
} else {
None
}
}
fn is_valid_ref(revision: &str) -> bool {
INVALID_NAME.matches(revision).into_iter().count() == 0
}
pub fn resolve(&mut self) -> Result<String, Vec<HintedError>> {
match self.resolve_query(self.query.clone()) {
Some(revision) => {
if self.load_commit(&revision).is_some() {
Ok(revision)
} else {
Err(self.errors.clone())
}
}
None => Err(self.errors.clone()),
}
}
/// Resolve Revision to commit object ID.
pub fn resolve_query(&mut self, query: Rev) -> Option<String> {
match query {
Rev::Ref { name } => self.read_ref(&name),
Rev::Parent { rev } => {
let oid = self.resolve_query(*rev).expect("Invalid parent rev");
self.commit_parent(&oid)
}
Rev::Ancestor { rev, n } => {
let mut oid = self.resolve_query(*rev).expect("Invalid ancestor rev");
for _ in 0..n {
if let Some(parent_oid) = self.commit_parent(&oid) {
oid = parent_oid
} else {
break;
}
}
Some(oid)
}
}
}
fn read_ref(&mut self, name: &str) -> Option<String> {
let symref = self.repo.refs.read_ref(name);
if symref.is_some() {
symref
} else {
let candidates = self.repo.database.prefix_match(name);
if candidates.len() == 1 {
Some(candidates[0].to_string())
} else {
if candidates.len() > 1 {
self.log_ambiguous_sha1(name, candidates);
}
None
}
}
}
fn log_ambiguous_sha1(&mut self, name: &str, mut candidates: Vec<String>) {
candidates.sort();
let message = format!("short SHA1 {} is ambiguous", name);
let mut hint = vec!["The candidates are:".to_string()];
for oid in candidates {
let object = self.repo.database.load(&oid);
let long_oid = object.get_oid();
let short = Database::short_oid(&long_oid);
let info = format!(" {} {}", short, object.obj_type());
let obj_message = if let ParsedObject::Commit(commit) = object {
format!(
"{} {} - {}",
info,
commit.author.short_date(),
commit.title_line()
)
} else {
info
};
hint.push(obj_message);
}
self.errors.push(HintedError { message, hint });
}
fn commit_parent(&mut self, oid: &str) -> Option<String> {
match self.load_commit(oid) {
Some(commit) => commit.parent.clone(),
None => None,
}
}
fn load_commit(&mut self, oid: &str) -> Option<&commit::Commit> {
match self.repo.database.load(oid) {
ParsedObject::Commit(commit) => Some(commit),
object => {
let message = format!("object {} is a {}, not a commit", oid, object.obj_type());
self.errors.push(HintedError {
message,
hint: vec![],
});
None
}
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/workspace.rs | src/workspace.rs | use crate::database::tree::{TreeEntry, TREE_MODE};
use crate::database::{Database, ParsedObject};
use crate::repository::migration::Action;
use std::collections::{BTreeSet, HashMap};
use std::fs::{self, File, OpenOptions};
use std::io::prelude::*;
use std::io::BufReader;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
lazy_static! {
static ref IGNORE_PATHS: Vec<&'static str> = {
let v = vec![".git", "target"];
v
};
}
pub struct Workspace {
path: PathBuf,
}
impl Workspace {
pub fn new(path: &Path) -> Workspace {
Workspace {
path: path.to_path_buf(),
}
}
pub fn abs_path(&self, rel_path: &str) -> PathBuf {
self.path.join(rel_path)
}
pub fn is_dir(&self, rel_path: &str) -> bool {
self.abs_path(rel_path).is_dir()
}
/// List contents of directory. Does NOT list contents of
/// subdirectories
pub fn list_dir(&self, dir: &Path) -> Result<HashMap<String, fs::Metadata>, std::io::Error> {
let path = self.path.join(dir);
let entries = fs::read_dir(&path)?
.map(|f| f.unwrap().path())
.filter(|f| !IGNORE_PATHS.contains(&f.file_name().unwrap().to_str().unwrap()));
let mut stats = HashMap::new();
for name in entries {
let relative = self
.path
.join(&name)
.strip_prefix(&self.path)
.unwrap()
.to_str()
.unwrap()
.to_string();
let stat = self.stat_file(&relative).expect("stat file failed");
stats.insert(relative, stat);
}
Ok(stats)
}
/// Return list of files in dir. Nested files are flattened
/// strings eg. `a/b/c/inner.txt`
pub fn list_files(&self, dir: &Path) -> Result<Vec<String>, std::io::Error> {
if dir.is_file() {
return Ok(vec![dir
.strip_prefix(&self.path)
.unwrap()
.to_str()
.unwrap()
.to_string()]);
}
if IGNORE_PATHS.contains(&dir.file_name().unwrap().to_str().unwrap()) {
return Ok(vec![]);
}
let mut files = vec![];
for file in fs::read_dir(dir)? {
let path = file?.path();
files.extend_from_slice(&self.list_files(&path)?);
}
Ok(files)
}
// TODO: Should return bytes instead?
pub fn read_file(&self, file_name: &str) -> Result<String, std::io::Error> {
let file = File::open(self.path.as_path().join(file_name))?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
Ok(contents)
}
pub fn stat_file(&self, file_name: &str) -> Result<fs::Metadata, std::io::Error> {
fs::metadata(self.path.join(file_name))
}
pub fn apply_migration(
&self,
database: &mut Database,
changes: &HashMap<Action, Vec<(PathBuf, Option<TreeEntry>)>>,
rmdirs: &BTreeSet<PathBuf>,
mkdirs: &BTreeSet<PathBuf>,
) -> Result<(), String> {
self.apply_change_list(database, changes, Action::Delete)
.map_err(|e| e.to_string())?;
for dir in rmdirs.iter().rev() {
let dir_path = self.path.join(dir);
self.remove_directory(&dir_path).unwrap_or(());
}
for dir in mkdirs.iter() {
self.make_directory(dir).map_err(|e| e.to_string())?;
}
self.apply_change_list(database, changes, Action::Update)
.map_err(|e| e.to_string())?;
self.apply_change_list(database, changes, Action::Create)
.map_err(|e| e.to_string())
}
fn apply_change_list(
&self,
database: &mut Database,
changes: &HashMap<Action, Vec<(PathBuf, Option<TreeEntry>)>>,
action: Action,
) -> std::io::Result<()> {
let changes = changes.get(&action).unwrap().clone();
for (filename, entry) in changes {
let path = self.path.join(filename);
Self::remove_file_or_dir(&path)?;
if action == Action::Delete {
continue;
}
let mut file = OpenOptions::new()
.write(true)
.create_new(true)
.open(&path)?;
let entry = entry
.expect("entry missing for non-delete");
if entry.mode() != TREE_MODE {
let data = Self::blob_data(database, &entry.get_oid());
file.write_all(&data)?;
// Set mode
let metadata = file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(entry.mode());
fs::set_permissions(path, permissions)?;
}
}
Ok(())
}
pub fn blob_data(database: &mut Database, oid: &str) -> Vec<u8> {
match database.load(oid) {
ParsedObject::Blob(blob) => blob.data.clone(),
_ => panic!("not a blob oid"),
}
}
fn remove_file_or_dir(path: &Path) -> std::io::Result<()> {
if path.is_dir() {
std::fs::remove_dir_all(path)
} else if path.is_file() {
std::fs::remove_file(path)
} else {
Ok(())
}
}
fn remove_directory(&self, path: &Path) -> std::io::Result<()> {
std::fs::remove_dir(path)?;
Ok(())
}
fn make_directory(&self, dirname: &Path) -> std::io::Result<()> {
let path = self.path.join(dirname);
if let Ok(stat) = self.stat_file(dirname.to_str().expect("conversion to str failed")) {
if stat.is_file() {
std::fs::remove_file(&path)?;
}
if !stat.is_dir() {
std::fs::create_dir(&path)?;
}
} else {
std::fs::create_dir(&path)?;
}
Ok(())
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/index.rs | src/index.rs | use crypto::digest::Digest;
use crypto::sha1::Sha1;
use std::cmp;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::convert::TryInto;
use std::fs::{self, File, OpenOptions};
use std::io::{self, ErrorKind, Read, Write};
use std::os::unix::fs::MetadataExt;
use std::path::{Path, PathBuf};
use std::str;
use crate::lockfile::Lockfile;
use crate::util::*;
const MAX_PATH_SIZE: u16 = 0xfff;
const CHECKSUM_SIZE: u64 = 20;
const HEADER_SIZE: usize = 12; // bytes
const MIN_ENTRY_SIZE: usize = 64;
#[derive(Debug, Clone)]
pub struct Entry {
ctime: i64,
ctime_nsec: i64,
mtime: i64,
mtime_nsec: i64,
dev: u64,
ino: u64,
uid: u32,
gid: u32,
size: u64,
flags: u16,
pub mode: u32,
pub oid: String,
pub path: String,
}
impl Entry {
fn is_executable(mode: u32) -> bool {
(mode >> 6) & 0b1 == 1
}
fn mode(mode: u32) -> u32 {
if Entry::is_executable(mode) {
0o100755u32
} else {
0o100644u32
}
}
fn new(pathname: &str, oid: &str, metadata: &fs::Metadata) -> Entry {
let path = pathname.to_string();
Entry {
ctime: metadata.ctime(),
ctime_nsec: metadata.ctime_nsec(),
mtime: metadata.mtime(),
mtime_nsec: metadata.mtime_nsec(),
dev: metadata.dev(),
ino: metadata.ino(),
mode: Entry::mode(metadata.mode()),
uid: metadata.uid(),
gid: metadata.gid(),
size: metadata.size(),
oid: oid.to_string(),
flags: cmp::min(path.len() as u16, MAX_PATH_SIZE),
path,
}
}
fn parse(bytes: &[u8]) -> Result<Entry, std::io::Error> {
let mut metadata_ints: Vec<u32> = vec![];
for i in 0..10 {
metadata_ints.push(u32::from_be_bytes(
bytes[i * 4..i * 4 + 4].try_into().unwrap(),
));
}
let oid = encode_hex(&bytes[40..60]);
let flags = u16::from_be_bytes(bytes[60..62].try_into().unwrap());
let path_bytes = bytes[62..].split(|b| b == &0u8).next().unwrap();
let path = str::from_utf8(path_bytes).unwrap().to_string();
Ok(Entry {
ctime: i64::from(metadata_ints[0]),
ctime_nsec: i64::from(metadata_ints[1]),
mtime: i64::from(metadata_ints[2]),
mtime_nsec: i64::from(metadata_ints[3]),
dev: u64::from(metadata_ints[4]),
ino: u64::from(metadata_ints[5]),
mode: metadata_ints[6],
uid: metadata_ints[7],
gid: metadata_ints[8],
size: u64::from(metadata_ints[9]),
oid,
flags,
path,
})
}
fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
// 10 32-bit integers
bytes.extend_from_slice(&(self.ctime as u32).to_be_bytes());
bytes.extend_from_slice(&(self.ctime_nsec as u32).to_be_bytes());
bytes.extend_from_slice(&(self.mtime as u32).to_be_bytes());
bytes.extend_from_slice(&(self.mtime_nsec as u32).to_be_bytes());
bytes.extend_from_slice(&(self.dev as u32).to_be_bytes());
bytes.extend_from_slice(&(self.ino as u32).to_be_bytes());
bytes.extend_from_slice(&(self.mode as u32).to_be_bytes());
bytes.extend_from_slice(&(self.uid as u32).to_be_bytes());
bytes.extend_from_slice(&(self.gid as u32).to_be_bytes());
bytes.extend_from_slice(&(self.size as u32).to_be_bytes());
// 20 bytes (40-char hex-string)
bytes.extend_from_slice(&decode_hex(&self.oid).expect("invalid oid"));
// 16-bit
bytes.extend_from_slice(&self.flags.to_be_bytes());
bytes.extend_from_slice(self.path.as_bytes());
bytes.push(0x0);
// add padding
while bytes.len() % 8 != 0 {
bytes.push(0x0)
}
bytes
}
fn parent_dirs(&self) -> Vec<&str> {
let path = Path::new(&self.path);
let mut parent_dirs: Vec<_> = path
.ancestors()
.map(|d| d.to_str().expect("invalid filename"))
.collect();
parent_dirs.pop(); // drop root dir(always "")
parent_dirs.reverse();
parent_dirs.pop(); // drop filename
parent_dirs
}
pub fn stat_match(&self, stat: &fs::Metadata) -> bool {
(self.mode == Entry::mode(stat.mode())) && (self.size == 0 || self.size == stat.size())
}
pub fn times_match(&self, stat: &fs::Metadata) -> bool {
self.ctime == stat.ctime()
&& self.ctime_nsec == stat.ctime_nsec()
&& self.mtime == stat.mtime()
&& self.mtime_nsec == stat.mtime_nsec()
}
pub fn update_stat(&mut self, stat: &fs::Metadata) {
self.ctime = stat.ctime();
self.ctime_nsec = stat.ctime_nsec();
self.mtime = stat.mtime();
self.mtime_nsec = stat.mtime_nsec();
self.dev = stat.dev();
self.ino = stat.ino();
self.mode = Entry::mode(stat.mode());
self.uid = stat.uid();
self.gid = stat.gid();
self.size = stat.size();
}
}
pub struct Checksum<T>
where
T: Read + Write,
{
file: T,
digest: Sha1,
}
impl<T> Checksum<T>
where
T: Read + Write,
{
fn new(file: T) -> Checksum<T> {
Checksum {
file,
digest: Sha1::new(),
}
}
fn read(&mut self, size: usize) -> Result<Vec<u8>, std::io::Error> {
let mut buf = vec![0; size];
self.file.read_exact(&mut buf)?;
self.digest.input(&buf);
Ok(buf)
}
fn write(&mut self, data: &[u8]) -> Result<(), std::io::Error> {
self.file.write_all(data)?;
self.digest.input(data);
Ok(())
}
fn write_checksum(&mut self) -> Result<(), std::io::Error> {
self.file
.write_all(&decode_hex(&self.digest.result_str()).unwrap())?;
Ok(())
}
fn verify_checksum(&mut self) -> Result<(), std::io::Error> {
let hash = self.digest.result_str();
let mut buf = vec![0; CHECKSUM_SIZE as usize];
self.file.read_exact(&mut buf)?;
let sum = encode_hex(&buf);
if sum != hash {
return Err(io::Error::new(
ErrorKind::Other,
"Checksum does not match value stored on disk",
));
}
Ok(())
}
}
pub struct Index {
pathname: PathBuf,
pub entries: BTreeMap<String, Entry>,
parents: HashMap<String, HashSet<String>>,
lockfile: Lockfile,
hasher: Option<Sha1>,
changed: bool,
}
impl Index {
pub fn new(path: &Path) -> Index {
Index {
pathname: path.to_path_buf(),
entries: BTreeMap::new(),
parents: HashMap::new(),
lockfile: Lockfile::new(path),
hasher: None,
changed: false,
}
}
pub fn write_updates(&mut self) -> Result<(), std::io::Error> {
if !self.changed {
return self.lockfile.rollback();
}
let lock = &mut self.lockfile;
let mut writer: Checksum<&Lockfile> = Checksum::new(lock);
let mut header_bytes: Vec<u8> = vec![];
header_bytes.extend_from_slice(b"DIRC");
header_bytes.extend_from_slice(&2u32.to_be_bytes()); // version no.
header_bytes.extend_from_slice(&(self.entries.len() as u32).to_be_bytes());
writer.write(&header_bytes)?;
for (_key, entry) in self.entries.iter() {
writer.write(&entry.to_bytes())?;
}
writer.write_checksum()?;
lock.commit()?;
Ok(())
}
/// Remove any entries whose name matches the name of one of the
/// new entry's parent directories
pub fn discard_conflicts(&mut self, entry: &Entry) {
for parent in entry.parent_dirs() {
self.remove_entry(parent);
}
let to_remove = {
let mut children = vec![];
if let Some(children_set) = self.parents.get(&entry.path) {
for child in children_set {
children.push(child.clone())
}
}
children
};
for child in to_remove {
self.remove_entry(&child);
}
}
pub fn remove(&mut self, pathname: &str) {
if let Some(children) = self.parents.get(pathname).cloned() {
for child in children {
self.remove_entry(&child);
}
}
self.remove_entry(pathname);
self.changed = true;
}
fn remove_entry(&mut self, pathname: &str) {
if let Some(entry) = self.entries.remove(pathname) {
for dirname in entry.parent_dirs() {
if let Some(ref mut children_set) = self.parents.get_mut(dirname) {
children_set.remove(pathname);
if children_set.is_empty() {
self.parents.remove(dirname);
}
}
}
}
}
pub fn add(&mut self, pathname: &str, oid: &str, metadata: &fs::Metadata) {
let entry = Entry::new(pathname, oid, metadata);
self.discard_conflicts(&entry);
self.store_entry(entry);
self.changed = true;
}
pub fn store_entry(&mut self, entry: Entry) {
self.entries.insert(entry.path.clone(), entry.clone());
for dirname in entry.parent_dirs() {
if let Some(ref mut children_set) = self.parents.get_mut(dirname) {
children_set.insert(entry.path.clone());
} else {
let mut h = HashSet::new();
h.insert(entry.path.clone());
self.parents.insert(dirname.to_string(), h);
}
}
}
pub fn load_for_update(&mut self) -> Result<(), std::io::Error> {
self.lockfile.hold_for_update()?;
self.load()?;
Ok(())
}
fn clear(&mut self) {
self.entries = BTreeMap::new();
self.hasher = None;
self.parents = HashMap::new();
self.changed = false;
}
fn open_index_file(&self) -> Option<File> {
if self.pathname.exists() {
OpenOptions::new()
.read(true)
.open(self.pathname.clone())
.ok()
} else {
None
}
}
fn read_header(checksum: &mut Checksum<File>) -> usize {
let data = checksum
.read(HEADER_SIZE)
.expect("could not read checksum header");
let signature = str::from_utf8(&data[0..4]).expect("invalid signature");
let version = u32::from_be_bytes(data[4..8].try_into().unwrap());
let count = u32::from_be_bytes(data[8..12].try_into().unwrap());
if signature != "DIRC" {
panic!("Signature: expected 'DIRC', but found {}", signature);
}
if version != 2 {
panic!("Version: expected '2', but found {}", version);
}
count as usize
}
fn read_entries(
&mut self,
checksum: &mut Checksum<File>,
count: usize,
) -> Result<(), std::io::Error> {
for _i in 0..count {
let mut entry = checksum.read(MIN_ENTRY_SIZE)?;
while entry.last().unwrap() != &0u8 {
entry.extend_from_slice(&checksum.read(8)?);
}
self.store_entry(Entry::parse(&entry)?);
}
Ok(())
}
pub fn load(&mut self) -> Result<(), std::io::Error> {
self.clear();
if let Some(file) = self.open_index_file() {
let mut reader = Checksum::new(file);
let count = Index::read_header(&mut reader);
self.read_entries(&mut reader, count)?;
reader.verify_checksum()?;
}
Ok(())
}
pub fn release_lock(&mut self) -> Result<(), std::io::Error> {
self.lockfile.rollback()
}
pub fn is_tracked_file(&self, pathname: &str) -> bool {
self.entries.contains_key(pathname)
}
pub fn is_tracked(&self, pathname: &str) -> bool {
self.is_tracked_file(pathname) || self.parents.contains_key(pathname)
}
pub fn update_entry_stat(&mut self, entry: &mut Entry, stat: &fs::Metadata) {
entry.update_stat(stat);
self.changed = true;
}
pub fn entry_for_path(&self, path: &str) -> Option<&Entry> {
self.entries.get(path)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::database::blob::Blob;
use crate::database::object::Object;
use crate::repository::Repository;
use rand::random;
use std::process::Command;
#[test]
fn add_files_to_index() -> Result<(), std::io::Error> {
// Add a file to an index and check that it's there
let mut temp_dir = generate_temp_name();
temp_dir.push_str("_jit_test");
let root_path = Path::new("/tmp").join(temp_dir);
let mut repo = Repository::new(&root_path);
fs::create_dir(&root_path)?;
let oid = encode_hex(&(0..20).map(|_n| random::<u8>()).collect::<Vec<u8>>());
let f1_filename = "alice.txt";
let f1_path = root_path.join(f1_filename);
File::create(&f1_path)?.write(b"file 1")?;
let stat = repo.workspace.stat_file(f1_filename)?;
{
repo.index.clear();
repo.index.add(f1_filename, &oid, &stat);
let index_entry_paths: Vec<&String> =
repo.index.entries.iter().map(|(path, _)| path).collect();
assert_eq!(vec![f1_filename], index_entry_paths);
}
// Replace file with directory
{
repo.index.clear();
repo.index.add("alice.txt", &oid, &stat);
repo.index.add("alice.txt/nested.txt", &oid, &stat);
repo.index.add("bob.txt", &oid, &stat);
let index_entry_paths: Vec<&String> =
repo.index.entries.iter().map(|(path, _)| path).collect();
assert_eq!(vec!["alice.txt/nested.txt", "bob.txt"], index_entry_paths);
}
// Replace directory with file
{
repo.index.clear();
repo.index.add("alice.txt", &oid, &stat);
repo.index.add("nested/bob.txt", &oid, &stat);
repo.index.add("nested", &oid, &stat);
let index_entry_paths: Vec<&String> =
repo.index.entries.iter().map(|(path, _)| path).collect();
assert_eq!(vec!["alice.txt", "nested"], index_entry_paths);
}
// Replace directory(with subdirectories) with file
{
repo.index.clear();
repo.index.add("alice.txt", &oid, &stat);
repo.index.add("nested/bob.txt", &oid, &stat);
repo.index.add("nested/inner/claire.txt", &oid, &stat);
repo.index.add("nested", &oid, &stat);
let index_entry_paths: Vec<&String> =
repo.index.entries.iter().map(|(path, _)| path).collect();
assert_eq!(vec!["alice.txt", "nested"], index_entry_paths);
}
// Cleanup
fs::remove_dir_all(&root_path)?;
Ok(())
}
#[test]
fn emit_index_file_same_as_stock_git() -> Result<(), std::io::Error> {
// Create index file, using "stock" git and our implementation and
// check that they are byte-for-byte equal
let mut temp_dir = generate_temp_name();
temp_dir.push_str("_jit_test");
let root_path = Path::new("/tmp").join(temp_dir);
let mut repo = Repository::new(&root_path);
fs::create_dir(&root_path)?;
let git_path = root_path.join(".git");
fs::create_dir(&git_path)?;
repo.index.load_for_update()?;
// Create some files
File::create(root_path.join("f1.txt"))?.write(b"file 1")?;
File::create(root_path.join("f2.txt"))?.write(b"file 2")?;
// Create an index out of those files
for pathname in repo.workspace.list_files(&root_path)? {
let data = repo.workspace.read_file(&pathname)?;
let stat = repo.workspace.stat_file(&pathname)?;
let blob = Blob::new(data.as_bytes());
repo.database.store(&blob)?;
repo.index.add(&pathname, &blob.get_oid(), &stat);
}
repo.index.write_updates()?;
// Store contents of our index file
let mut our_index = File::open(&git_path.join("index"))?;
let mut our_index_contents = Vec::new();
our_index.read_to_end(&mut our_index_contents)?;
// Remove .git dir that we created
fs::remove_dir_all(&git_path)?;
// Create index using "stock" git
let _git_init_output = Command::new("git")
.current_dir(&root_path)
.arg("init")
.arg(".")
.output();
let _git_output = Command::new("git")
.current_dir(&root_path)
.arg("add")
.arg(".")
.output();
let mut git_index = File::open(&git_path.join("index"))?;
let mut git_index_contents = Vec::new();
git_index.read_to_end(&mut git_index_contents)?;
assert_eq!(our_index_contents, git_index_contents);
// Cleanup
fs::remove_dir_all(&root_path)?;
Ok(())
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/util.rs | src/util.rs | use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use std::fmt::Write;
use std::num::ParseIntError;
use std::path::Path;
pub fn decode_hex(s: &str) -> Result<Vec<u8>, ParseIntError> {
(0..s.len())
.step_by(2)
.map(|i| u8::from_str_radix(&s[i..i + 2], 16))
.collect()
}
pub fn encode_hex(bytes: &[u8]) -> String {
let mut s = String::with_capacity(bytes.len() * 2);
for &b in bytes {
write!(&mut s, "{:02x}", b).expect("hex encoding failed");
}
s
}
pub fn generate_temp_name() -> String {
thread_rng().sample_iter(&Alphanumeric).take(6).collect()
}
pub fn relative_path_from(path: &Path, from: &Path) -> String {
path.strip_prefix(from)
.unwrap()
.to_str()
.unwrap()
.to_string()
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/main.rs | src/main.rs | extern crate chrono;
extern crate crypto;
extern crate flate2;
extern crate rand;
#[macro_use]
extern crate lazy_static;
extern crate regex;
extern crate clap;
use std::collections::HashMap;
use std::env;
use std::io::{self, Write};
mod lockfile;
mod database;
mod index;
mod refs;
mod repository;
mod util;
mod workspace;
mod diff;
mod pager;
mod revision;
mod commands;
use commands::{execute, get_app, CommandContext};
fn main() {
let ctx = CommandContext {
dir: env::current_dir().unwrap(),
env: &env::vars().collect::<HashMap<String, String>>(),
options: None,
stdin: io::stdin(),
stdout: io::stdout(),
stderr: io::stderr(),
};
let matches = get_app().get_matches();
match execute(matches, ctx) {
Ok(_) => (),
Err(msg) => {
io::stderr().write_all(msg.as_bytes()).unwrap();
std::process::exit(128);
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/pager.rs | src/pager.rs | use std::collections::HashMap;
use std::ffi::OsString;
const PAGER_CMD: &str = "less";
lazy_static! {
static ref PAGER_ENV: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("LESS", "FRX");
m.insert("LV", "-c");
m
};
}
mod utils {
use std::ffi::{CString, OsString};
use std::os::unix::ffi::OsStringExt;
use std::ptr;
use errno;
use libc;
fn split_string(s: &OsString) -> Vec<OsString> {
match s.clone().into_string() {
Ok(cmd) => cmd.split_whitespace().map(OsString::from).collect(),
Err(cmd) => vec![cmd],
}
}
pub fn pipe() -> (i32, i32) {
let mut fds = [0; 2];
assert_eq!(unsafe { libc::pipe(fds.as_mut_ptr()) }, 0);
(fds[0], fds[1])
}
pub fn close(fd: i32) {
assert_eq!(unsafe { libc::close(fd) }, 0);
}
pub fn dup2(fd1: i32, fd2: i32) {
assert!(unsafe { libc::dup2(fd1, fd2) } > -1);
}
fn osstring2cstring(s: OsString) -> CString {
unsafe { CString::from_vec_unchecked(s.into_vec()) }
}
pub fn execvp(cmd: &OsString) {
let cstrings = split_string(cmd)
.into_iter()
.map(osstring2cstring)
.collect::<Vec<_>>();
let mut args = cstrings.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
args.push(ptr::null());
errno::set_errno(errno::Errno(0));
unsafe { libc::execvp(args[0], args.as_ptr()) };
}
// Helper wrappers around libc::* API
pub fn fork() -> libc::pid_t {
unsafe { libc::fork() }
}
}
pub struct Pager;
impl Pager {
pub fn setup_pager() {
let (git_pager, pager) = (std::env::var("GIT_PAGER"), std::env::var("PAGER"));
let cmd = match (git_pager, pager) {
(Ok(git_pager), _) => git_pager,
(_, Ok(pager)) => pager,
_ => PAGER_CMD.to_string(),
};
let pager_cmd = OsString::from(cmd);
for (k, v) in PAGER_ENV.iter() {
std::env::set_var(k, v);
}
let (pager_stdin, main_stdout) = utils::pipe();
let pager_pid = utils::fork();
match pager_pid {
-1 => {
// Fork failed
utils::close(pager_stdin);
utils::close(main_stdout);
}
0 => {
// Child
utils::dup2(main_stdout, libc::STDOUT_FILENO);
utils::close(pager_stdin);
}
_ => {
// Parent-- executes pager
utils::dup2(pager_stdin, libc::STDIN_FILENO);
utils::close(main_stdout);
utils::execvp(&pager_cmd);
}
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/branch.rs | src/commands/branch.rs | use crate::commands::CommandContext;
use crate::database::object::Object;
use crate::database::{Database, ParsedObject};
use crate::pager::Pager;
use crate::refs::Ref;
use crate::repository::Repository;
use crate::revision::Revision;
use colored::*;
use std::io::{Read, Write};
pub struct Branch<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
repo: Repository,
ctx: CommandContext<'a, I, O, E>,
}
impl<'a, I, O, E> Branch<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
pub fn new(ctx: CommandContext<'a, I, O, E>) -> Branch<'a, I, O, E> {
let working_dir = &ctx.dir;
let root_path = working_dir.as_path();
let repo = Repository::new(&root_path);
Branch { repo, ctx }
}
pub fn run(&mut self) -> Result<(), String> {
let options = self.ctx.options.as_ref().unwrap().clone();
let args: Vec<_> = if let Some(args) = options.values_of("args") {
args.collect()
} else {
vec![]
};
if options.is_present("delete") || options.is_present("force_delete") {
self.delete_branches(args)?;
} else if args.is_empty() {
self.list_branches()?;
} else {
let branch_name = args.get(0).expect("no branch name provided");
let start_point = args.get(1);
self.create_branch(branch_name, start_point)?;
}
Ok(())
}
fn list_branches(&mut self) -> Result<(), String> {
let current = self.repo.refs.current_ref("HEAD");
let mut branches = self.repo.refs.list_branches();
branches.sort();
let max_width = branches
.iter()
.map(|b| self.repo.refs.ref_short_name(b).len())
.max()
.unwrap_or(0);
Pager::setup_pager();
for r#ref in branches {
let info = self.format_ref(&r#ref, ¤t);
let extended_info = self.extended_branch_info(&r#ref, max_width);
println!("{}{}", info, extended_info);
}
Ok(())
}
fn format_ref(&self, r#ref: &Ref, current: &Ref) -> String {
if r#ref == current {
format!("* {}", self.repo.refs.ref_short_name(r#ref).green())
} else {
format!(" {}", self.repo.refs.ref_short_name(r#ref))
}
}
fn extended_branch_info(&mut self, r#ref: &Ref, max_width: usize) -> String {
if self
.ctx
.options
.as_ref()
.map(|o| o.is_present("verbose"))
.unwrap_or(false)
{
let oid = self
.repo
.refs
.read_oid(r#ref)
.expect("unable to resolve branch to oid");
let commit = if let ParsedObject::Commit(commit) = self.repo.database.load(&oid) {
commit
} else {
panic!("branch ref was not pointing to commit");
};
let oid = commit.get_oid();
let short = Database::short_oid(&oid);
let ref_short_name = self.repo.refs.ref_short_name(r#ref);
format!(
"{:width$}{} {}",
" ",
short,
commit.title_line(),
width = (max_width - ref_short_name.len() + 1)
)
} else {
"".to_string()
}
}
fn create_branch(
&mut self,
branch_name: &str,
start_point: Option<&&str>,
) -> Result<(), String> {
let start_point = if start_point.is_none() {
self.repo.refs.read_head().expect("empty HEAD")
} else {
match Revision::new(&mut self.repo, start_point.unwrap()).resolve() {
Ok(rev) => rev,
Err(errors) => {
let mut v = vec![];
for error in errors {
v.push(format!("error: {}", error.message));
for h in error.hint {
v.push(format!("hint: {}", h));
}
}
v.push("\n".to_string());
return Err(v.join("\n"));
}
}
};
self.repo.refs.create_branch(branch_name, &start_point)?;
Ok(())
}
fn delete_branches(&mut self, branch_names: Vec<&str>) -> Result<(), String> {
for branch in branch_names {
self.delete_branch(branch)?;
}
Ok(())
}
fn delete_branch(&mut self, branch_name: &str) -> Result<(), String> {
let force = self
.ctx
.options
.as_ref()
.map(|o| o.is_present("force") || o.is_present("force_delete"))
.unwrap_or(false);
if !force {
return Ok(());
}
let oid = self.repo.refs.delete_branch(branch_name)?;
let short = Database::short_oid(&oid);
println!("Deleted branch {} (was {})", branch_name, short);
Ok(())
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/diff.rs | src/commands/diff.rs | use crate::commands::CommandContext;
use crate::database::blob::Blob;
use crate::database::object::Object;
use crate::database::{Database, ParsedObject};
use crate::diff;
use crate::diff::myers::{Edit, EditType};
use crate::pager::Pager;
use crate::repository::{ChangeType, Repository};
use colored::*;
use std::io::{Read, Write};
use std::os::unix::fs::MetadataExt;
const NULL_OID: &str = "0000000";
const NULL_PATH: &str = "/dev/null";
pub struct Diff<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
repo: Repository,
ctx: CommandContext<'a, I, O, E>,
}
struct Target {
path: String,
oid: String,
mode: Option<u32>,
data: String,
}
impl<'a, I, O, E> Diff<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
pub fn new(ctx: CommandContext<'a, I, O, E>) -> Diff<'a, I, O, E> {
let working_dir = &ctx.dir;
let root_path = working_dir.as_path();
let repo = Repository::new(&root_path);
Diff { ctx, repo }
}
pub fn run(&mut self) -> Result<(), String> {
self.repo.index.load().map_err(|e| e.to_string())?;
self.repo.initialize_status()?;
Pager::setup_pager();
if self
.ctx
.options
.as_ref()
.map(|o| o.is_present("cached"))
.unwrap_or(false)
{
self.diff_head_index()
} else {
self.diff_index_workspace()
}
}
fn diff_head_index(&mut self) -> Result<(), String> {
for (path, state) in &self.repo.index_changes.clone() {
match state {
ChangeType::Added => {
let b = self.from_index(path);
self.print_diff(self.from_nothing(path), b)?;
}
ChangeType::Modified => {
let a = self.from_head(path);
let b = self.from_index(path);
self.print_diff(a, b)?;
}
ChangeType::Deleted => {
let a = self.from_head(path);
self.print_diff(a, self.from_nothing(path))?;
}
state => panic!("NYI: {:?}", state),
}
}
Ok(())
}
fn diff_index_workspace(&mut self) -> Result<(), String> {
for (path, state) in &self.repo.workspace_changes.clone() {
match state {
ChangeType::Added => {
self.print_diff(self.from_nothing(path), self.from_file(path))?;
}
ChangeType::Modified => {
let a = self.from_index(path);
self.print_diff(a, self.from_file(path))?;
}
ChangeType::Deleted => {
let a = self.from_index(path);
self.print_diff(a, self.from_nothing(path))?;
}
state => panic!("NYI: {:?}", state),
}
}
Ok(())
}
fn print_diff(&mut self, mut a: Target, mut b: Target) -> Result<(), String> {
if a.oid == b.oid && a.mode == b.mode {
return Ok(());
}
a.path = format!("a/{}", a.path);
b.path = format!("b/{}", b.path);
println!("{}", format!("diff --git {} {}", a.path, b.path).bold());
self.print_diff_mode(&a, &b)?;
self.print_diff_content(&a, &b)
}
fn print_diff_mode(&mut self, a: &Target, b: &Target) -> Result<(), String> {
if a.mode == None {
println!(
"{}",
format!("new file mode {:o}", b.mode.expect("missing mode")).bold()
);
} else if b.mode == None {
println!(
"{}",
format!("deleted file mode {:o}", a.mode.expect("missing mode")).bold()
);
} else if a.mode != b.mode {
println!(
"{}",
format!("old mode {:o}", a.mode.expect("missing mode")).bold()
);
println!(
"{}",
format!("new mode {:o}", b.mode.expect("missing mode")).bold()
);
}
Ok(())
}
fn print_diff_content(&mut self, a: &Target, b: &Target) -> Result<(), String> {
if a.oid == b.oid {
return Ok(());
}
println!(
"{}",
format!(
"index {}..{}{}",
short(&a.oid),
short(&b.oid),
if a.mode == b.mode {
format!(" {:o}", a.mode.expect("Missing mode"))
} else {
"".to_string()
}
)
.bold()
);
println!("{}", format!("--- {}", a.path).bold());
println!("{}", format!("+++ {}", b.path).bold());
let hunks = diff::Diff::diff_hunks(&a.data, &b.data);
for h in hunks {
self.print_diff_hunk(h).map_err(|e| e.to_string())?;
}
Ok(())
}
fn print_diff_edit(&mut self, edit: Edit) -> Result<(), String> {
let edit_string = match &edit.edit_type {
EditType::Ins => format!("{}", edit).green(),
EditType::Del => format!("{}", edit).red(),
EditType::Eql => format!("{}", edit).normal(),
};
println!("{}", edit_string);
Ok(())
}
fn print_diff_hunk(&mut self, hunk: diff::Hunk) -> Result<(), String> {
println!("{}", hunk.header().cyan());
for edit in hunk.edits {
self.print_diff_edit(edit).map_err(|e| e.to_string())?;
}
Ok(())
}
fn from_index(&mut self, path: &str) -> Target {
let entry = self
.repo
.index
.entry_for_path(path)
.expect("Path not found in index");
let oid = entry.oid.clone();
let blob = match self.repo.database.load(&oid) {
ParsedObject::Blob(blob) => blob,
_ => panic!("path is not a blob"),
};
Target {
path: path.to_string(),
oid,
mode: Some(entry.mode),
data: std::str::from_utf8(&blob.data)
.expect("utf8 conversion failed")
.to_string(),
}
}
fn from_file(&self, path: &str) -> Target {
let blob = Blob::new(
self.repo
.workspace
.read_file(path)
.expect("Failed to read file")
.as_bytes(),
);
let oid = blob.get_oid();
let mode = self.repo.stats.get(path).unwrap().mode();
Target {
path: path.to_string(),
oid,
mode: Some(mode),
data: std::str::from_utf8(&blob.data)
.expect("utf8 conversion failed")
.to_string(),
}
}
fn from_nothing(&self, path: &str) -> Target {
Target {
path: path.to_string(),
oid: NULL_OID.to_string(),
mode: None,
data: "".to_string(),
}
}
fn from_head(&mut self, path: &str) -> Target {
let entry = self
.repo
.head_tree
.get(path)
.expect("Path not found in HEAD");
let oid = entry.get_oid();
let mode = entry.mode();
let blob = match self.repo.database.load(&oid) {
ParsedObject::Blob(blob) => blob,
_ => panic!("path is not a blob"),
};
Target {
path: path.to_string(),
oid,
mode: Some(mode),
data: std::str::from_utf8(&blob.data)
.expect("utf8 conversion failed")
.to_string(),
}
}
}
fn short(oid: &str) -> &str {
Database::short_oid(oid)
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/status.rs | src/commands/status.rs | use crate::commands::CommandContext;
use crate::repository::{ChangeType, Repository};
use colored::*;
use std::collections::HashMap;
use std::io::{Read, Write};
static LABEL_WIDTH: usize = 12;
lazy_static! {
static ref SHORT_STATUS: HashMap<ChangeType, &'static str> = {
let mut m = HashMap::new();
m.insert(ChangeType::Added, "A");
m.insert(ChangeType::Modified, "M");
m.insert(ChangeType::Deleted, "D");
m
};
static ref LONG_STATUS: HashMap<ChangeType, &'static str> = {
let mut m = HashMap::new();
m.insert(ChangeType::Added, "new file:");
m.insert(ChangeType::Modified, "modified:");
m.insert(ChangeType::Deleted, "deleted:");
m
};
}
pub struct Status<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
repo: Repository,
ctx: CommandContext<'a, I, O, E>,
}
impl<'a, I, O, E> Status<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
pub fn new(ctx: CommandContext<'a, I, O, E>) -> Status<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
let working_dir = &ctx.dir;
let root_path = working_dir.as_path();
let repo = Repository::new(&root_path);
Status { repo, ctx }
}
fn status_for(&self, path: &str) -> String {
let left = if let Some(index_change) = self.repo.index_changes.get(path) {
SHORT_STATUS.get(index_change).unwrap_or(&" ")
} else {
" "
};
let right = if let Some(workspace_change) = self.repo.workspace_changes.get(path) {
SHORT_STATUS.get(workspace_change).unwrap_or(&" ")
} else {
" "
};
format!("{}{}", left, right)
}
fn print_porcelain_format(&mut self) -> Result<(), String> {
for file in &self.repo.changed {
println!("{} {}", self.status_for(file), file);
}
for file in &self.repo.untracked {
println!("?? {}", file);
}
Ok(())
}
fn print_long_format(&mut self) -> Result<(), String> {
self.print_index_changes("Changes to be committed", "green")?;
self.print_workspace_changes("Changes not staged for commit", "red")?;
self.print_untracked_files("Untracked files", "red")?;
self.print_commit_status()?;
Ok(())
}
fn print_index_changes(&mut self, message: &str, style: &str) -> Result<(), String> {
println!("{}", message);
for (path, change_type) in &self.repo.index_changes {
if let Some(status) = LONG_STATUS.get(change_type) {
println!(
"{}",
format!("\t{:width$}{}", status, path, width = LABEL_WIDTH).color(style)
);
}
}
println!();
Ok(())
}
fn print_workspace_changes(&mut self, message: &str, style: &str) -> Result<(), String> {
println!("{}", message);
for (path, change_type) in &self.repo.workspace_changes {
if let Some(status) = LONG_STATUS.get(change_type) {
println!(
"{}",
format!("\t{:width$}{}", status, path, width = LABEL_WIDTH).color(style)
);
}
}
println!();
Ok(())
}
fn print_untracked_files(&mut self, message: &str, style: &str) -> Result<(), String> {
println!("{}", message);
for path in &self.repo.untracked {
println!("{}", format!("\t{}", path).color(style));
}
println!();
Ok(())
}
pub fn print_results(&mut self) -> Result<(), String> {
if self
.ctx
.options
.as_ref()
.map(|o| o.is_present("porcelain"))
.unwrap_or(false)
{
self.print_porcelain_format()?;
} else {
self.print_long_format()?;
}
Ok(())
}
fn print_commit_status(&mut self) -> Result<(), String> {
if !self.repo.index_changes.is_empty() {
return Ok(());
}
if !self.repo.workspace_changes.is_empty() {
println!("no changes added to commit");
} else if !self.repo.untracked.is_empty() {
println!("nothing added to commit but untracked files present");
} else {
println!("nothing to commit, working tree clean");
}
Ok(())
}
pub fn run(&mut self) -> Result<(), String> {
self.repo
.index
.load_for_update()
.expect("failed to load index");
self.repo.initialize_status()?;
self.repo
.index
.write_updates()
.expect("failed to write index");
self.print_results()
.expect("printing status results failed");
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::commands::tests::*;
use std::{thread, time};
#[test]
fn list_untracked_files_in_name_order() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.write_file("file.txt", b"hello").unwrap();
cmd_helper.write_file("another.txt", b"hello").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(
"?? another.txt
?? file.txt\n",
);
}
#[test]
fn list_files_as_untracked_if_not_in_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("committed.txt", b"").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.commit("commit message");
cmd_helper.write_file("file.txt", b"").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("?? file.txt\n");
}
#[test]
fn list_untracked_dir_not_contents() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.write_file("file.txt", b"").unwrap();
cmd_helper.write_file("dir/another.txt", b"").unwrap();
cmd_helper.assert_status(
"?? dir/
?? file.txt\n",
);
}
#[test]
fn list_untracked_files_inside_tracked_dir() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("a/b/inner.txt", b"").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.commit("commit message");
cmd_helper.write_file("a/outer.txt", b"").unwrap();
cmd_helper.write_file("a/b/c/file.txt", b"").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(
"?? a/b/c/
?? a/outer.txt\n",
);
}
#[test]
fn does_not_list_empty_untracked_dirs() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.mkdir("outer").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn list_untracked_dirs_that_indirectly_contain_files() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("outer/inner/file.txt", b"").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("?? outer/\n");
}
fn create_and_commit(cmd_helper: &mut CommandHelper) {
cmd_helper.write_file("1.txt", b"one").unwrap();
cmd_helper.write_file("a/2.txt", b"two").unwrap();
cmd_helper.write_file("a/b/3.txt", b"three").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.commit("commit message");
}
#[test]
fn prints_nothing_when_no_files_changed() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn reports_files_with_changed_contents() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.clear_stdout();
cmd_helper.write_file("1.txt", b"changed").unwrap();
cmd_helper.write_file("a/2.txt", b"modified").unwrap();
cmd_helper.assert_status(
" M 1.txt
M a/2.txt\n",
);
}
#[test]
fn reports_files_with_changed_modes() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.make_executable("a/2.txt").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(" M a/2.txt\n");
}
#[test]
fn reports_modified_files_with_unchanged_size() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
// Sleep so that mtime is slightly different from what is in
// index
let ten_millis = time::Duration::from_millis(2);
thread::sleep(ten_millis);
cmd_helper.write_file("a/b/3.txt", b"hello").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(" M a/b/3.txt\n");
}
#[test]
fn prints_nothing_if_file_is_touched() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.touch("1.txt").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn reports_deleted_files() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.delete("a/2.txt").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(" D a/2.txt\n");
}
#[test]
fn reports_files_in_deleted_dir() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.delete("a").unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(
" D a/2.txt
D a/b/3.txt\n",
);
}
#[test]
fn reports_file_added_to_tracked_dir() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.write_file("a/4.txt", b"four").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("A a/4.txt\n");
}
#[test]
fn reports_file_added_to_untracked_dir() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.write_file("d/e/5.txt", b"five").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("A d/e/5.txt\n");
}
#[test]
fn reports_files_with_modes_modified_between_head_and_index() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.make_executable("1.txt").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("M 1.txt\n");
}
#[test]
fn reports_files_with_contents_modified_between_head_and_index() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.write_file("a/b/3.txt", b"modified").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("M a/b/3.txt\n");
}
#[test]
fn reports_files_deleted_in_index() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.delete("1.txt").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status("D 1.txt\n");
}
#[test]
fn reports_all_deleted_files_in_dir() {
let mut cmd_helper = CommandHelper::new();
create_and_commit(&mut cmd_helper);
cmd_helper.delete("a").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.clear_stdout();
cmd_helper.assert_status(
"D a/2.txt
D a/b/3.txt\n",
);
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/checkout.rs | src/commands/checkout.rs | use crate::commands::CommandContext;
use crate::database::object::Object;
use crate::database::tree::TreeEntry;
use crate::database::tree_diff::TreeDiff;
use crate::database::{Database, ParsedObject};
use crate::refs::Ref;
use crate::repository::Repository;
use crate::revision::Revision;
use std::collections::HashMap;
use std::io::{Read, Write};
use std::path::PathBuf;
const DETACHED_HEAD_MESSAGE: &str =
"You are in 'detached HEAD' state. You can look around, make experimental
changes and commit them, and you can discard any commits you make in this
state without impacting any branches by performing another checkout.
If you want to create a new branch to retain commits you create, you may
do so (now or later) by using the branch command. Example:
rug branch <new-branch-name>
";
pub struct Checkout<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
repo: Repository,
ctx: CommandContext<'a, I, O, E>,
}
impl<'a, I, O, E> Checkout<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
pub fn new(ctx: CommandContext<'a, I, O, E>) -> Checkout<'a, I, O, E> {
let working_dir = &ctx.dir;
let root_path = working_dir.as_path();
let repo = Repository::new(&root_path);
Checkout { repo, ctx }
}
fn print_head_position(&mut self, message: &str, oid: &str) -> Result<(), String> {
let commit = match self.repo.database.load(oid) {
ParsedObject::Commit(commit) => commit,
_ => panic!("oid not a commit"),
};
let oid = commit.get_oid();
let short = Database::short_oid(&oid);
println!(
"{}",
format!("{} {} {}", message, short, commit.title_line())
);
Ok(())
}
fn print_previous_head(
&mut self,
current_ref: &Ref,
current_oid: &str,
target_oid: &str,
) -> Result<(), String> {
if current_ref.is_head() && current_oid != target_oid {
return self.print_head_position("Previous HEAD position was", current_oid);
}
Ok(())
}
fn print_detachment_notice(
&mut self,
current_ref: &Ref,
target: &str,
new_ref: &Ref,
) -> Result<(), String> {
if new_ref.is_head() && !current_ref.is_head() {
println!(
"{}
{}
",
format!("Note: checking out '{}'.", target),
DETACHED_HEAD_MESSAGE
);
}
Ok(())
}
fn print_new_head(
&mut self,
current_ref: &Ref,
new_ref: &Ref,
target: &str,
target_oid: &str,
) -> Result<(), String> {
if new_ref.is_head() {
self.print_head_position("HEAD is now at", target_oid)?;
} else if new_ref == current_ref {
eprintln!("{}", format!("Already on {}", target));
} else {
eprintln!("{}", format!("Switched to branch {}", target));
}
Ok(())
}
pub fn run(&mut self) -> Result<(), String> {
let options = self.ctx.options.as_ref().unwrap().clone();
let args: Vec<_> = if let Some(args) = options.values_of("args") {
args.collect()
} else {
vec![]
};
let target = args.get(0).expect("no target provided");
self.repo
.index
.load_for_update()
.map_err(|e| e.to_string())?;
let current_ref = self.repo.refs.current_ref("HEAD");
let current_oid = self
.repo
.refs
.read_oid(¤t_ref)
.unwrap_or_else(|| panic!("failed to read ref: {:?}", current_ref));
let mut revision = Revision::new(&mut self.repo, target);
let target_oid = match revision.resolve() {
Ok(oid) => oid,
Err(errors) => {
let mut v = vec![];
for error in errors {
v.push(format!("error: {}", error.message));
for h in error.hint {
v.push(format!("hint: {}", h));
}
}
v.push("\n".to_string());
return Err(v.join("\n"));
}
};
let tree_diff = self.tree_diff(¤t_oid, &target_oid);
let mut migration = self.repo.migration(tree_diff);
migration.apply_changes()?;
self.repo.index.write_updates().map_err(|e| e.to_string())?;
self.repo
.refs
.set_head(&target, &target_oid)
.map_err(|e| e.to_string())?;
let new_ref = self.repo.refs.current_ref("HEAD");
self.print_previous_head(¤t_ref, ¤t_oid, &target_oid)?;
self.print_detachment_notice(¤t_ref, &target, &new_ref)?;
self.print_new_head(¤t_ref, &new_ref, &target, &target_oid)?;
Ok(())
}
fn tree_diff(
&mut self,
a: &str,
b: &str,
) -> HashMap<PathBuf, (Option<TreeEntry>, Option<TreeEntry>)> {
let mut td = TreeDiff::new(&mut self.repo.database);
td.compare_oids(
Some(a.to_string()),
Some(b.to_string()),
std::path::Path::new(""),
);
td.changes
}
}
#[cfg(test)]
mod tests {
use crate::commands::tests::*;
use std::collections::HashMap;
lazy_static! {
static ref BASE_FILES: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("1.txt", "1");
m.insert("outer/2.txt", "2");
m.insert("outer/inner/3.txt", "3");
m
};
}
fn commit_all(cmd_helper: &mut CommandHelper) {
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.commit("change");
}
fn commit_and_checkout(cmd_helper: &mut CommandHelper, revision: &str) {
commit_all(cmd_helper);
cmd_helper.jit_cmd(&["checkout", revision]).unwrap();
}
fn before(cmd_helper: &mut CommandHelper) {
cmd_helper.jit_cmd(&["init"]).unwrap();
for (filename, contents) in BASE_FILES.iter() {
cmd_helper
.write_file(filename, contents.as_bytes())
.unwrap();
}
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.commit("first");
}
fn assert_stale_file(error: Result<(String, String), String>, filename: &str) {
if let Err(error) = error {
assert_eq!(error,
format!("Your local changes to the following files would be overwritten by checkout:\n\t{}\nPlease commit your changes to stash them before you switch branches\n\n", filename));
} else {
assert!(false, format!("Expected Err but got {:?}", error));
}
}
fn assert_stale_directory(error: Result<(String, String), String>, filename: &str) {
if let Err(error) = error {
assert_eq!(error,
format!("Updating the following directories would lose untracked files in them:\n\t{}\n\n\n\n", filename));
} else {
assert!(false, format!("Expected Err but got {:?}", error));
}
}
fn assert_remove_conflict(error: Result<(String, String), String>, filename: &str) {
if let Err(error) = error {
assert_eq!(error,
format!("The following untracked working tree files would be removed by checkout:\n\t{}\nPlease commit your changes to stash them before you switch branches\n\n", filename));
} else {
assert!(false, format!("Expected Err but got {:?}", error));
}
}
fn assert_overwrite_conflict(error: Result<(String, String), String>, filename: &str) {
if let Err(error) = error {
assert_eq!(error,
format!("The following untracked working tree files would be overwritten by checkout:\n\t{}\nPlease move or remove them before you switch branches\n\n", filename));
} else {
assert!(false, format!("Expected Err but got {:?}", error));
}
}
#[test]
fn updates_a_changed_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
}
#[test]
fn fails_to_update_a_modified_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"conflict").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn fails_to_update_a_modified_equal_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"1").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn fails_to_update_a_changed_mode_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.make_executable("1.txt").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn restores_a_deleted_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("1.txt").unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
}
#[test]
fn restores_files_from_a_deleted_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper
.write_file("outer/inner/3.txt", b"changed")
.unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer").unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
let mut expected_workspace = BASE_FILES.clone();
expected_workspace.remove("outer/2.txt");
cmd_helper.assert_workspace(expected_workspace);
cmd_helper.clear_stdout();
cmd_helper.assert_status(" D outer/2.txt\n");
}
#[test]
fn fails_to_update_a_staged_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"conflict").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn updates_a_staged_equal_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"1").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
}
#[test]
fn fails_to_update_a_staged_changed_mode_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.make_executable("1.txt").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn fails_to_update_an_unindexed_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("1.txt").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn fails_to_update_an_unindexed_and_untracked_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("1.txt").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.write_file("1.txt", b"conflict").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "1.txt");
}
#[test]
fn fails_to_update_an_unindexed_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper
.write_file("outer/inner/3.txt", b"changed")
.unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/inner/3.txt");
}
#[test]
fn fails_to_update_with_a_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper
.write_file("outer/inner/3.txt", b"changed")
.unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/inner/3.txt");
}
#[test]
fn fails_to_update_with_a_staged_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper
.write_file("outer/inner/3.txt", b"changed")
.unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/inner/3.txt");
}
#[test]
fn fails_to_update_with_an_unstaged_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper
.write_file("outer/inner/3.txt", b"changed")
.unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/inner/3.txt");
}
#[test]
fn fails_to_update_with_a_file_at_a_child_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
cmd_helper
.write_file("outer/2.txt/extra.log", b"conflict")
.unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/2.txt");
}
#[test]
fn fails_to_update_with_a_staged_file_at_a_child_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
cmd_helper
.write_file("outer/2.txt/extra.log", b"conflict")
.unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/2.txt");
}
#[test]
fn removes_a_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("94.txt", b"94").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn removes_a_file_from_an_existing_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn removes_a_file_from_a_new_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("new/94.txt", b"94").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.assert_noent("new");
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn removes_a_file_from_a_new_nested_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("new/inner/94.txt", b"94").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.assert_noent("new");
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn fails_to_remove_a_modified_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"conflict").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/94.txt");
}
#[test]
fn fails_to_remove_a_changed_mode_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.make_executable("outer/94.txt").unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/94.txt");
}
#[test]
fn leaves_a_deleted_file_deleted() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/94.txt").unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn leaves_a_deleted_directory_deleted() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/inner/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
let mut expected_workspace = BASE_FILES.clone();
expected_workspace.remove("outer/inner/3.txt").unwrap();
cmd_helper.assert_workspace(expected_workspace);
cmd_helper.clear_stdout();
cmd_helper.assert_status(" D outer/inner/3.txt\n");
}
#[test]
fn fails_to_remove_a_staged_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"conflict").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/94.txt");
}
#[test]
fn fails_to_remove_a_staged_changed_mode_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.make_executable("outer/94.txt").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/94.txt");
}
#[test]
fn leaves_an_unindexed_file_deleted() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/94.txt").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn fails_to_remove_an_unindexed_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/94.txt").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.write_file("outer/94.txt", b"conflict").unwrap();
assert_remove_conflict(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/94.txt");
}
#[test]
fn leaves_an_unindexed_directory_deleted() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/inner/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
let mut expected_workspace = BASE_FILES.clone();
expected_workspace.remove("outer/inner/3.txt").unwrap();
cmd_helper.assert_workspace(expected_workspace);
cmd_helper.clear_stdout();
cmd_helper.assert_status("D outer/inner/3.txt\n");
}
#[test]
fn fails_to_remove_with_a_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/inner/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
assert_stale_file(
cmd_helper.jit_cmd(&["checkout", "@^"]),
"outer/inner/94.txt",
);
}
#[test]
fn removes_a_file_with_a_staged_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/inner/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
let mut expected_workspace = BASE_FILES.clone();
expected_workspace.remove("outer/inner/3.txt").unwrap();
expected_workspace.insert("outer/inner", "conflict");
cmd_helper.assert_workspace(expected_workspace);
cmd_helper.clear_stdout();
cmd_helper.assert_status(
"A outer/inner
D outer/inner/3.txt\n",
);
}
#[test]
fn fails_to_remove_with_an_unstaged_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/inner/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.delete(".git/index").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
assert_remove_conflict(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/inner");
}
#[test]
fn fails_to_remove_with_a_file_at_a_child_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/94.txt").unwrap();
cmd_helper
.write_file("outer/94.txt/extra.log", b"conflict")
.unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/94.txt");
}
#[test]
fn removes_a_file_with_a_staged_file_at_a_child_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("outer/94.txt", b"94").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/94.txt").unwrap();
cmd_helper
.write_file("outer/94.txt/extra.log", b"conflict")
.unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn adds_a_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("1.txt").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn adds_a_file_to_a_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn adds_a_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn fails_to_add_an_untracked_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"conflict").unwrap();
assert_overwrite_conflict(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/2.txt");
}
#[test]
fn fails_to_add_a_staged_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"conflict").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
assert_stale_file(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/2.txt");
}
#[test]
fn adds_a_staged_equal_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"2").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn fails_to_add_with_an_untracked_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/inner/3.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
assert_overwrite_conflict(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/inner");
}
#[test]
fn adds_a_file_with_a_staged_file_at_a_parent_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/inner/3.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"conflict").unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn fails_to_add_with_an_untracked_file_at_a_child_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper
.write_file("outer/2.txt/extra.log", b"conflict")
.unwrap();
assert_stale_directory(cmd_helper.jit_cmd(&["checkout", "@^"]), "outer/2.txt");
}
#[test]
fn adds_a_file_with_a_staged_file_at_a_child_path() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
commit_all(&mut cmd_helper);
cmd_helper
.write_file("outer/2.txt/extra.log", b"conflict")
.unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn replaces_a_file_with_a_directory() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.write_file("outer/inner", b"in").unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn replaces_a_directory_with_a_file() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.delete("outer/2.txt").unwrap();
cmd_helper
.write_file("outer/2.txt/nested.log", b"nested")
.unwrap();
commit_and_checkout(&mut cmd_helper, "@^");
cmd_helper.assert_workspace(BASE_FILES.clone());
cmd_helper.clear_stdout();
cmd_helper.assert_status("");
}
#[test]
fn maintains_workspace_modifications() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"hello").unwrap();
cmd_helper.delete("outer/inner").unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
let mut expected_workspace = HashMap::new();
expected_workspace.insert("1.txt", "1");
expected_workspace.insert("outer/2.txt", "hello");
cmd_helper.assert_workspace(expected_workspace);
cmd_helper.clear_stdout();
cmd_helper.assert_status(
" M outer/2.txt
D outer/inner/3.txt\n",
);
}
#[test]
fn maintains_index_modifications() {
let mut cmd_helper = CommandHelper::new();
before(&mut cmd_helper);
cmd_helper.write_file("1.txt", b"changed").unwrap();
commit_all(&mut cmd_helper);
cmd_helper.write_file("outer/2.txt", b"hello").unwrap();
cmd_helper
.write_file("outer/inner/4.txt", b"world")
.unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper.jit_cmd(&["checkout", "@^"]).unwrap();
let mut expected_workspace = BASE_FILES.clone();
expected_workspace.insert("outer/2.txt", "hello");
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | true |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/log.rs | src/commands/log.rs | use crate::commands::CommandContext;
use crate::database::commit::Commit;
use crate::database::object::Object;
use crate::database::{Database, ParsedObject};
use crate::pager::Pager;
use crate::refs::Ref;
use crate::repository::Repository;
use colored::*;
use std::collections::HashMap;
use std::io::{Read, Write};
#[derive(Clone, Copy)]
enum FormatOption {
Medium,
OneLine,
}
#[derive(Clone, Copy)]
enum DecorateOption {
Auto,
Short,
Full,
No,
}
struct Options {
abbrev: bool,
format: FormatOption,
decorate: DecorateOption,
}
pub struct Log<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
current_oid: Option<String>,
repo: Repository,
ctx: CommandContext<'a, I, O, E>,
options: Options,
reverse_refs: Option<HashMap<String, Vec<Ref>>>,
current_ref: Option<Ref>,
}
impl<'a, I, O, E> Log<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
pub fn new(ctx: CommandContext<'a, I, O, E>) -> Log<'a, I, O, E> {
let working_dir = &ctx.dir;
let root_path = working_dir.as_path();
let repo = Repository::new(&root_path);
let current_oid = repo.refs.read_head();
let ctx_options = ctx.options.as_ref().unwrap().clone();
let options = Self::define_options(ctx_options);
Log {
ctx,
repo,
current_oid,
options,
reverse_refs: None,
current_ref: None,
}
}
fn define_options(options: clap::ArgMatches) -> Options {
let mut abbrev = None;
if options.is_present("abbrev-commit") {
abbrev = Some(true);
}
if options.is_present("no-abbrev-commit") {
abbrev = Some(false);
}
let mut format = FormatOption::Medium;
if options.is_present("format") || options.is_present("pretty") {
match options.value_of("format").unwrap() {
"oneline" => {
format = FormatOption::OneLine;
}
"medium" => {
format = FormatOption::Medium;
}
_ => (),
};
}
if options.is_present("oneline") {
format = FormatOption::OneLine;
if abbrev == None {
abbrev = Some(true);
}
}
let mut decorate = DecorateOption::Short;
if options.is_present("decorate") {
decorate = match options.value_of("decorate").unwrap() {
"full" => DecorateOption::Full,
"short" => DecorateOption::Short,
"no" => DecorateOption::No,
_ => unimplemented!(),
}
}
if options.is_present("no-decorate") {
decorate = DecorateOption::No;
}
Options {
abbrev: abbrev.unwrap_or(false),
format,
decorate,
}
}
pub fn run(&mut self) -> Result<(), String> {
Pager::setup_pager();
self.reverse_refs = Some(self.repo.refs.reverse_refs());
self.current_ref = Some(self.repo.refs.current_ref("HEAD"));
// FIXME: Print commits as they are returned by the iterator
// instead of collecting into a Vec.
let mut commits = vec![];
for c in &mut self.into_iter() {
commits.push(c);
}
commits
.iter()
.for_each(|commit| self.show_commit(commit).unwrap());
Ok(())
}
fn show_commit(&self, commit: &Commit) -> Result<(), String> {
match self.options.format {
FormatOption::Medium => {
self.show_commit_medium(commit)?; // , abbrev, decorate, reverse_refs, current_ref)
}
FormatOption::OneLine => {
self.show_commit_oneline(commit)?; // , abbrev, decorate, reverse_refs, current_ref)
}
}
Ok(())
}
fn abbrev(&self, commit: &Commit) -> String {
if self.options.abbrev {
let oid = commit.get_oid();
Database::short_oid(&oid).to_string()
} else {
commit.get_oid()
}
}
fn show_commit_medium(&self, commit: &Commit) -> Result<(), String> {
let author = &commit.author;
println!();
println!(
"commit {} {}",
self.abbrev(commit).yellow(),
self.decorate(commit)
);
println!("Author: {} <{}>", author.name, author.email);
println!("Date: {}", author.readable_time());
println!();
for line in commit.message.lines() {
println!(" {}", line);
}
Ok(())
}
fn show_commit_oneline(&self, commit: &Commit) -> Result<(), String> {
println!(
"{} {} {}",
self.abbrev(commit).yellow(),
self.decorate(commit),
commit.title_line()
);
Ok(())
}
fn decorate(&self, commit: &Commit) -> String {
match self.options.decorate {
DecorateOption::No | DecorateOption::Auto => return "".to_string(), // TODO: check isatty
_ => (),
}
let refs = self.reverse_refs.as_ref().unwrap().get(&commit.get_oid());
if let Some(refs) = refs {
let (head, refs): (Vec<&Ref>, Vec<&Ref>) = refs.into_iter().partition(|r#ref| {
r#ref.is_head() && !self.current_ref.as_ref().unwrap().is_head()
});
let names: Vec<_> = refs
.iter()
.map(|r#ref| self.decoration_name(head.get(0), r#ref))
.collect();
format!(
" {}{}{}",
"(".yellow(),
names.join(&", ".yellow()),
")".yellow()
)
} else {
"".to_string()
}
}
fn decoration_name(&self, head: Option<&&Ref>, r#ref: &Ref) -> String {
let mut name = match self.options.decorate {
DecorateOption::Short | DecorateOption::Auto => self.repo.refs.ref_short_name(r#ref),
DecorateOption::Full => r#ref.path().to_string(),
_ => unimplemented!(),
};
name = name.bold().color(Self::ref_color(&r#ref)).to_string();
if let Some(head) = head {
if r#ref == self.current_ref.as_ref().unwrap() {
name = format!("{} -> {}", "HEAD", name)
.color(Self::ref_color(head))
.to_string();
}
}
name
}
fn ref_color(r#ref: &Ref) -> &str {
if r#ref.is_head() {
"cyan"
} else {
"green"
}
}
}
impl<'a, I, O, E> Iterator for Log<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
type Item = Commit;
fn next(&mut self) -> Option<Commit> {
if let Some(current_oid) = &self.current_oid {
if let ParsedObject::Commit(commit) = self.repo.database.load(¤t_oid) {
self.current_oid = commit.parent.clone();
Some(commit.clone())
} else {
None
}
} else {
None
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/commit.rs | src/commands/commit.rs | use std::io::{Read, Write};
use chrono::prelude::*;
use crate::commands::CommandContext;
use crate::database::commit::{Author, Commit};
use crate::database::object::Object;
use crate::database::tree::Tree;
use crate::database::Entry;
use crate::repository::Repository;
pub fn commit_command<I, O, E>(mut ctx: CommandContext<I, O, E>) -> Result<(), String>
where
I: Read,
O: Write,
E: Write,
{
let working_dir = ctx.dir;
let root_path = working_dir.as_path();
let mut repo = Repository::new(&root_path);
repo.index.load().expect("loading .git/index failed");
let entries: Vec<Entry> = repo
.index
.entries
.iter()
.map(|(_path, idx_entry)| Entry::from(idx_entry))
.collect();
let root = Tree::build(&entries);
root.traverse(&|tree| {
repo.database
.store(tree)
.expect("Traversing tree to write to database failed")
});
let parent = repo.refs.read_head();
let author_name = ctx
.env
.get("GIT_AUTHOR_NAME")
.expect("GIT_AUTHOR_NAME not set");
let author_email = ctx
.env
.get("GIT_AUTHOR_EMAIL")
.expect("GIT_AUTHOR_EMAIL not set");
let author = Author {
name: author_name.to_string(),
email: author_email.to_string(),
time: Utc::now().with_timezone(&FixedOffset::east(0)),
};
let mut commit_message = String::new();
ctx.stdin
.read_to_string(&mut commit_message)
.expect("reading commit from STDIN failed");
let commit = Commit::new(&parent, root.get_oid(), author, commit_message);
repo.database.store(&commit).expect("writing commit failed");
repo.refs
.update_head(&commit.get_oid())
.expect("updating HEAD failed");
let commit_prefix = if parent.is_some() {
""
} else {
"(root-commit) "
};
println!("[{}{}] {}", commit_prefix, commit.get_oid(), commit.message);
Ok(())
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/mod.rs | src/commands/mod.rs | use clap::{App, Arg, ArgMatches, SubCommand};
use std::collections::HashMap;
use std::io::{Read, Write};
use std::path::PathBuf;
mod add;
use add::add_command;
mod init;
use init::init_command;
mod commit;
use commit::commit_command;
mod status;
use status::Status;
mod diff;
use diff::Diff;
mod branch;
use branch::Branch;
mod checkout;
use checkout::Checkout;
mod log;
use log::Log;
#[derive(Debug)]
pub struct CommandContext<'a, I, O, E>
where
I: Read,
O: Write,
E: Write,
{
pub dir: PathBuf,
pub env: &'a HashMap<String, String>,
pub options: Option<ArgMatches<'a>>,
pub stdin: I,
pub stdout: O,
pub stderr: E,
}
pub fn get_app() -> App<'static, 'static> {
App::new("rug")
.subcommand(
SubCommand::with_name("init")
.about("Create an empty Git repository or reinitialize an existing one")
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("status")
.about("Show the working tree status")
.arg(Arg::with_name("porcelain").long("porcelain"))
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("commit")
.about("Record changes to the repository")
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("add")
.about("Add file contents to the index")
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("diff")
.about("Show changes between commits, commit and working tree, etc")
.arg(Arg::with_name("cached").long("cached"))
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("branch")
.about("List, create, or delete branches")
.arg(Arg::with_name("verbose").short("v").long("verbose"))
.arg(Arg::with_name("delete").short("d").long("delete"))
.arg(Arg::with_name("force").long("force"))
.arg(Arg::with_name("force_delete").short("D"))
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("checkout")
.about("Switch branches or restore working tree files")
.arg(Arg::with_name("args").multiple(true)),
)
.subcommand(
SubCommand::with_name("log")
.about("Show commit logs")
.arg(Arg::with_name("abbrev-commit").long("abbrev-commit"))
.arg(Arg::with_name("no-abbrev-commit").long("no-abbrev-commit"))
.arg(
Arg::with_name("pretty")
.long("pretty")
.takes_value(true)
.value_name("format"),
)
.arg(
Arg::with_name("format")
.long("format")
.takes_value(true)
.value_name("format"),
)
.arg(Arg::with_name("oneline").long("oneline"))
.arg(
Arg::with_name("decorate")
.long("decorate")
.takes_value(true)
.value_name("format"),
)
.arg(Arg::with_name("no-decorate").long("no-decorate"))
.arg(Arg::with_name("args").multiple(true)),
)
}
pub fn execute<'a, I, O, E>(
matches: ArgMatches<'a>,
mut ctx: CommandContext<'a, I, O, E>,
) -> Result<(), String>
where
I: Read,
O: Write,
E: Write,
{
match matches.subcommand() {
("init", sub_matches) => {
ctx.options = sub_matches.cloned();
init_command(ctx)
}
("commit", sub_matches) => {
ctx.options = sub_matches.cloned();
commit_command(ctx)
}
("add", sub_matches) => {
ctx.options = sub_matches.cloned();
add_command(ctx)
}
("status", sub_matches) => {
ctx.options = sub_matches.cloned();
let mut cmd = Status::new(ctx);
cmd.run()
}
("diff", sub_matches) => {
ctx.options = sub_matches.cloned();
let mut cmd = Diff::new(ctx);
cmd.run()
}
("branch", sub_matches) => {
ctx.options = sub_matches.cloned();
let mut cmd = Branch::new(ctx);
cmd.run()
}
("checkout", sub_matches) => {
ctx.options = sub_matches.cloned();
let mut cmd = Checkout::new(ctx);
cmd.run()
}
("log", sub_matches) => {
ctx.options = sub_matches.cloned();
let mut cmd = Log::new(ctx);
cmd.run()
}
_ => Ok(()),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::repository::Repository;
use crate::util::*;
use assert_cmd::prelude::*;
use filetime::FileTime;
use std::env;
use std::fs::{self, File, OpenOptions};
use std::io::Cursor;
use std::io::Write;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::process::{Command, Stdio};
use std::str;
use std::time::{SystemTime, UNIX_EPOCH};
extern crate assert_cmd;
pub fn gen_repo_path() -> PathBuf {
let mut temp_dir = generate_temp_name();
temp_dir.push_str("_rug_test");
env::temp_dir()
.canonicalize()
.expect("canonicalization failed")
.join(temp_dir)
}
pub fn repo(repo_path: &Path) -> Repository {
Repository::new(&repo_path)
}
pub struct CommandHelper {
repo_path: PathBuf,
stdin: String,
stdout: Cursor<Vec<u8>>,
env: HashMap<String, String>,
}
impl CommandHelper {
pub fn new() -> CommandHelper {
let repo_path = gen_repo_path();
fs::create_dir_all(&repo_path).unwrap();
CommandHelper {
repo_path,
stdin: String::new(),
stdout: Cursor::new(vec![]),
env: HashMap::new(),
}
}
fn set_env(&mut self, key: &str, value: &str) {
self.env.insert(key.to_string(), value.to_string());
}
fn set_stdin(&mut self, s: &str) {
self.stdin = s.to_string();
}
pub fn jit_cmd(&mut self, args: &[&str]) -> Result<(String, String), String> {
let mut cmd = Command::cargo_bin(env!("CARGO_PKG_NAME"))
.unwrap()
.args(args)
.current_dir(&self.repo_path)
.envs(&self.env)
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.stdin(Stdio::piped())
.spawn()
.expect("Failed to spawn child process");
cmd.stdin
.as_mut()
.unwrap()
.write_all(self.stdin.as_bytes())
.unwrap();
let output = cmd.wait_with_output().expect("failed to run executable");
let (stdout, stderr) = (
String::from_utf8_lossy(&output.stdout).to_string(),
String::from_utf8_lossy(&output.stderr).to_string(),
);
if output.status.success() {
Ok((stdout, stderr))
} else {
Err(stderr)
}
}
pub fn commit(&mut self, msg: &str) {
self.set_env("GIT_AUTHOR_NAME", "A. U. Thor");
self.set_env("GIT_AUTHOR_EMAIL", "author@example.com");
self.set_stdin(msg);
self.jit_cmd(&["commit"]).unwrap();
}
pub fn write_file(&self, file_name: &str, contents: &[u8]) -> Result<(), std::io::Error> {
let path = Path::new(&self.repo_path).join(file_name);
fs::create_dir_all(path.parent().unwrap())?;
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(&path)?;
file.write_all(contents)?;
Ok(())
}
pub fn mkdir(&self, dir_name: &str) -> Result<(), std::io::Error> {
fs::create_dir_all(self.repo_path.join(dir_name))
}
pub fn touch(&self, file_name: &str) -> Result<(), std::io::Error> {
let path = Path::new(&self.repo_path).join(file_name);
let now = FileTime::from_unix_time(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("time is broken")
.as_secs() as i64,
0,
);
filetime::set_file_times(path, now, now)
}
pub fn delete(&self, pathname: &str) -> Result<(), std::io::Error> {
let path = Path::new(&self.repo_path).join(pathname);
if path.is_dir() {
fs::remove_dir_all(path)
} else {
fs::remove_file(path)
}
}
pub fn make_executable(&self, file_name: &str) -> Result<(), std::io::Error> {
let path = self.repo_path.join(file_name);
let file = File::open(&path)?;
let metadata = file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o744);
fs::set_permissions(path, permissions)?;
Ok(())
}
pub fn make_unreadable(&self, file_name: &str) -> Result<(), std::io::Error> {
let path = self.repo_path.join(file_name);
let file = File::open(&path)?;
let metadata = file.metadata()?;
let mut permissions = metadata.permissions();
permissions.set_mode(0o044);
fs::set_permissions(path, permissions)?;
Ok(())
}
pub fn assert_index(&self, expected: Vec<(u32, String)>) -> Result<(), std::io::Error> {
let mut repo = repo(&self.repo_path);
repo.index.load()?;
let actual: Vec<(u32, String)> = repo
.index
.entries
.iter()
.map(|(_, entry)| (entry.mode, entry.path.clone()))
.collect();
assert_eq!(expected, actual);
Ok(())
}
pub fn clear_stdout(&mut self) {
self.stdout = Cursor::new(vec![]);
}
pub fn assert_status(&mut self, expected: &str) {
if let Ok((stdout, _stderr)) = self.jit_cmd(&["status", "--porcelain"]) {
assert_output(&stdout, expected)
} else {
assert!(false);
}
}
pub fn assert_workspace(&self, expected_contents: HashMap<&str, &str>) {
let mut files = HashMap::new();
for file in repo(&self.repo_path)
.workspace
.list_files(&self.repo_path)
.unwrap()
{
let file_contents = repo(&self.repo_path).workspace.read_file(&file).unwrap();
files.insert(file, file_contents);
}
assert_maps_equal(expected_contents, files);
}
pub fn assert_noent(&self, filename: &str) {
assert_eq!(false, Path::new(filename).exists())
}
}
impl Drop for CommandHelper {
fn drop(&mut self) {
fs::remove_dir_all(&self.repo_path);
}
}
pub fn assert_output(stream: &str, expected: &str) {
assert_eq!(stream, expected);
}
fn assert_maps_equal(a: HashMap<&str, &str>, b: HashMap<String, String>) {
assert_eq!(a.len(), b.len());
for (k, v) in a {
if let Some(bv) = b.get(k) {
assert_eq!(v, *bv);
}
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/init.rs | src/commands/init.rs | use crate::refs::Refs;
use std::fs;
use std::io::{Read, Write};
use std::path::Path;
use crate::commands::CommandContext;
const DEFAULT_BRANCH: &str = "master";
pub fn init_command<I, O, E>(ctx: CommandContext<I, O, E>) -> Result<(), String>
where
I: Read,
O: Write,
E: Write,
{
let working_dir = ctx.dir;
let options = ctx.options.as_ref().unwrap();
let args: Vec<_> = if let Some(args) = options.values_of("args") {
args.collect()
} else {
vec![]
};
let root_path = if !args.is_empty() {
Path::new(args[0])
} else {
working_dir.as_path()
};
let git_path = root_path.join(".git");
for d in ["objects", "refs/heads"].iter() {
fs::create_dir_all(git_path.join(d)).expect("failed to create dir");
}
let refs = Refs::new(&git_path);
let path = Path::new("refs/heads").join(DEFAULT_BRANCH);
refs.update_head(&format!(
"ref: {}",
path.to_str().expect("failed to convert path to str")
))
.map_err(|e| e.to_string())?;
println!("Initialized empty Jit repository in {:?}\n", git_path);
Ok(())
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/commands/add.rs | src/commands/add.rs | use std::io::{self, Read, Write};
use crate::commands::CommandContext;
use crate::database::blob::Blob;
use crate::database::object::Object;
use crate::repository::Repository;
static INDEX_LOAD_OR_CREATE_FAILED: &'static str = "fatal: could not create/load .git/index\n";
fn locked_index_message(e: &std::io::Error) -> String {
format!("fatal: {}
Another jit process seems to be running in this repository. Please make sure all processes are terminated then try again.
If it still fails, a jit process may have crashed in this repository earlier: remove the .git/index.lock file manually to continue.\n",
e)
}
fn add_failed_message(e: &std::io::Error) -> String {
format!(
"{}
fatal: adding files failed\n",
e
)
}
fn add_to_index(repo: &mut Repository, pathname: &str) -> Result<(), String> {
let data = match repo.workspace.read_file(&pathname) {
Ok(data) => data,
Err(ref err) if err.kind() == io::ErrorKind::PermissionDenied => {
repo.index.release_lock().unwrap();
return Err(add_failed_message(&err));
}
_ => {
panic!("fatal: adding files failed");
}
};
let stat = repo
.workspace
.stat_file(&pathname)
.expect("could not stat file");
let blob = Blob::new(data.as_bytes());
repo.database.store(&blob).expect("storing blob failed");
repo.index.add(&pathname, &blob.get_oid(), &stat);
Ok(())
}
pub fn add_command<I, O, E>(ctx: CommandContext<I, O, E>) -> Result<(), String>
where
I: Read,
O: Write,
E: Write,
{
let working_dir = ctx.dir;
let root_path = working_dir.as_path();
let mut repo = Repository::new(&root_path);
let options = ctx.options.as_ref().unwrap();
let args: Vec<_> = if let Some(args) = options.values_of("args") {
args.collect()
} else {
vec![]
};
match repo.index.load_for_update() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
return Err(locked_index_message(e));
}
Err(_) => {
return Err(INDEX_LOAD_OR_CREATE_FAILED.to_string());
}
}
let mut paths = vec![];
for arg in args {
let path = match working_dir.join(arg).canonicalize() {
Ok(canon_path) => canon_path,
Err(_) => {
repo.index.release_lock().unwrap();
return Err(format!(
"fatal: pathspec '{:}' did not match any files\n",
arg
));
}
};
for pathname in repo.workspace.list_files(&path).unwrap() {
paths.push(pathname);
}
}
for pathname in paths {
add_to_index(&mut repo, &pathname)?;
}
repo.index
.write_updates()
.expect("writing updates to index failed");
Ok(())
}
#[cfg(test)]
mod tests {
use crate::commands::tests::*;
#[test]
fn add_regular_file_to_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("hello.txt", b"hello").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "hello.txt"]).unwrap();
cmd_helper
.assert_index(vec![(0o100644, "hello.txt".to_string())])
.unwrap();
}
#[test]
fn add_executable_file_to_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("hello.txt", b"hello").unwrap();
cmd_helper.make_executable("hello.txt").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "hello.txt"]).unwrap();
cmd_helper
.assert_index(vec![(0o100755, "hello.txt".to_string())])
.unwrap();
}
#[test]
fn add_multiple_files_to_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("hello.txt", b"hello").unwrap();
cmd_helper.write_file("world.txt", b"world").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper
.jit_cmd(&["add", "hello.txt", "world.txt"])
.unwrap();
cmd_helper
.assert_index(vec![
(0o100644, "hello.txt".to_string()),
(0o100644, "world.txt".to_string()),
])
.unwrap();
}
#[test]
fn incrementally_add_files_to_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("hello.txt", b"hello").unwrap();
cmd_helper.write_file("world.txt", b"world").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "hello.txt"]).unwrap();
cmd_helper
.assert_index(vec![(0o100644, "hello.txt".to_string())])
.unwrap();
cmd_helper.jit_cmd(&["add", "world.txt"]).unwrap();
cmd_helper
.assert_index(vec![
(0o100644, "hello.txt".to_string()),
(0o100644, "world.txt".to_string()),
])
.unwrap();
}
#[test]
fn add_a_directory_to_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("a-dir/nested.txt", b"hello").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "a-dir"]).unwrap();
cmd_helper
.assert_index(vec![(0o100644, "a-dir/nested.txt".to_string())])
.unwrap();
}
#[test]
fn add_repository_root_to_index() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("a/b/c/hello.txt", b"hello").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
cmd_helper.jit_cmd(&["add", "."]).unwrap();
cmd_helper
.assert_index(vec![(0o100644, "a/b/c/hello.txt".to_string())])
.unwrap();
}
#[test]
fn add_fails_for_non_existent_files() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.jit_cmd(&["init"]).unwrap();
assert!(cmd_helper.jit_cmd(&["add", "hello.txt"]).is_err());
}
#[test]
fn add_fails_for_unreadable_files() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("hello.txt", b"hello").unwrap();
cmd_helper.make_unreadable("hello.txt").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
assert!(cmd_helper.jit_cmd(&["add", "hello.txt"]).is_err());
}
#[test]
fn add_fails_if_index_is_locked() {
let mut cmd_helper = CommandHelper::new();
cmd_helper.write_file("hello.txt", b"hello").unwrap();
cmd_helper.write_file(".git/index.lock", b"hello").unwrap();
cmd_helper.jit_cmd(&["init"]).unwrap();
assert!(cmd_helper.jit_cmd(&["add", "hello.txt"]).is_err());
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/diff/myers.rs | src/diff/myers.rs | use crate::diff::Line;
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::fmt;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum EditType {
Eql,
Ins,
Del,
}
impl EditType {
fn to_string(&self) -> &str {
match self {
EditType::Eql => " ",
EditType::Ins => "+",
EditType::Del => "-",
}
}
}
#[derive(Debug, Clone)]
pub struct Edit {
pub edit_type: EditType,
pub a_line: Option<Line>,
pub b_line: Option<Line>,
}
impl Edit {
fn new(edit_type: EditType, a_line: Option<Line>, b_line: Option<Line>) -> Edit {
Edit {
edit_type,
a_line,
b_line,
}
}
}
impl fmt::Display for Edit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let line = if let Some(a) = &self.a_line {
a
} else if let Some(b) = &self.b_line {
b
} else {
panic!("both lines None")
};
write!(f, "{}{}", self.edit_type.to_string(), line)
}
}
pub struct Myers {
a: Vec<Line>,
b: Vec<Line>,
}
fn to_usize(i: isize) -> usize {
usize::try_from(i).unwrap()
}
impl Myers {
pub fn new(a: Vec<Line>, b: Vec<Line>) -> Myers {
Myers { a, b }
}
pub fn diff(&self) -> Vec<Edit> {
let mut diff = vec![];
for (prev_x, prev_y, x, y) in self.backtrack().iter() {
let a_line = if to_usize(*prev_x) >= self.a.len() {
None
} else {
Some(self.a[to_usize(*prev_x)].clone())
};
let b_line = if to_usize(*prev_y) >= self.b.len() {
None
} else {
Some(self.b[to_usize(*prev_y)].clone())
};
if x == prev_x {
diff.push(Edit::new(EditType::Ins, None, b_line));
} else if y == prev_y {
diff.push(Edit::new(EditType::Del, a_line, None));
} else {
diff.push(Edit::new(EditType::Eql, a_line, b_line));
}
}
diff.reverse();
diff
}
fn shortest_edit(&self) -> Vec<BTreeMap<isize, isize>> {
let n = self.a.len() as isize;
let m = self.b.len() as isize;
let max: isize = n + m;
let mut v = BTreeMap::new();
v.insert(1, 0);
let mut trace = vec![];
for d in 0..=max {
trace.push(v.clone());
for k in (-d..=d).step_by(2) {
let mut x: isize =
if k == -d || (k != d && v.get(&(k - 1)).unwrap() < v.get(&(k + 1)).unwrap()) {
// v[k+1] has the farthest x- position along line
// k+1
// move downward
*v.get(&(k + 1)).unwrap()
} else {
// move rightward
v.get(&(k - 1)).unwrap() + 1
};
let mut y: isize = x - k;
while x < n && y < m && self.a[to_usize(x)].text == self.b[to_usize(y)].text {
x = x + 1;
y = y + 1;
}
v.insert(k, x);
if x >= n && y >= m {
return trace;
}
}
}
vec![]
}
fn backtrack(&self) -> Vec<(isize, isize, isize, isize)> {
let mut x = self.a.len() as isize;
let mut y = self.b.len() as isize;
let mut seq = vec![];
for (d, v) in self.shortest_edit().iter().enumerate().rev() {
let d = d as isize;
let k = x - y;
let prev_k =
if k == -d || (k != d && v.get(&(k - 1)).unwrap() < v.get(&(k + 1)).unwrap()) {
k + 1
} else {
k - 1
};
let prev_x = *v.get(&prev_k).unwrap();
let prev_y = prev_x - prev_k;
while x > prev_x && y > prev_y {
seq.push((x - 1, y - 1, x, y));
x = x - 1;
y = y - 1;
}
if d > 0 {
seq.push((prev_x, prev_y, x, y));
}
x = prev_x;
y = prev_y;
}
seq
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/diff/mod.rs | src/diff/mod.rs | pub mod myers;
use myers::{Edit, EditType, Myers};
use std::fmt;
pub struct Diff {}
#[derive(Clone, Debug)]
pub struct Line {
number: usize,
text: String,
}
impl Line {
fn new(number: usize, text: &str) -> Line {
Line {
number,
text: text.to_string(),
}
}
}
impl fmt::Display for Line {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.text)
}
}
fn lines(a: &str) -> Vec<Line> {
let mut a_lines = vec![];
for (i, text) in a.split('\n').enumerate() {
a_lines.push(Line::new(i + 1, text));
}
a_lines
}
impl Diff {
pub fn diff(a: &str, b: &str) -> Vec<Edit> {
let a_lines = lines(a);
let b_lines = lines(b);
Myers::new(a_lines, b_lines).diff()
}
pub fn diff_hunks(a: &str, b: &str) -> Vec<Hunk> {
Hunk::filter(Self::diff(a, b))
}
}
fn get_edit(edits: &[Edit], offset: isize) -> Option<&Edit> {
if offset < 0 || offset >= edits.len() as isize {
None
} else {
Some(&edits[offset as usize])
}
}
const HUNK_CONTEXT: usize = 3;
const EMPTY_EDIT: Edit = Edit {
edit_type: EditType::Eql,
a_line: None,
b_line: None,
};
pub struct Hunk {
pub a_start: usize,
pub b_start: usize,
pub edits: Vec<Edit>,
}
enum LineType {
A,
B,
}
impl Hunk {
fn new(a_start: usize, b_start: usize, edits: Vec<Edit>) -> Hunk {
Hunk {
a_start,
b_start,
edits,
}
}
pub fn header(&self) -> String {
let (a_start, a_lines) = self.offsets_for(LineType::A, self.a_start);
let (b_start, b_lines) = self.offsets_for(LineType::B, self.b_start);
format!("@@ -{},{} +{},{} @@", a_start, a_lines, b_start, b_lines)
}
fn offsets_for(&self, line_type: LineType, default: usize) -> (usize, usize) {
let lines: Vec<_> = self
.edits
.iter()
.map(|e| match line_type {
LineType::A => &e.a_line,
LineType::B => &e.b_line,
})
.filter_map(|l| l.as_ref())
.collect();
let start = if lines.len() > 0 {
lines[0].number
} else {
default
};
(start, lines.len())
}
pub fn filter(edits: Vec<Edit>) -> Vec<Hunk> {
let mut hunks = vec![];
let mut offset: isize = 0;
let empty_line = Line::new(0, "");
loop {
// Skip over Eql edits
while let Some(edit) = get_edit(&edits, offset) {
if edit.edit_type == EditType::Eql {
offset += 1;
} else {
break;
}
}
if offset >= (edits.len() as isize) {
return hunks;
}
offset -= (HUNK_CONTEXT + 1) as isize;
let a_start = if offset < 0 {
0
} else {
get_edit(&edits, offset)
.unwrap_or(&EMPTY_EDIT)
.a_line
.clone()
.unwrap_or(empty_line.clone())
.number
};
let b_start = if offset < 0 {
0
} else {
get_edit(&edits, offset)
.unwrap_or(&EMPTY_EDIT)
.b_line
.clone()
.unwrap_or(empty_line.clone())
.number
};
let (hunk, new_offset) = Self::build_hunk(a_start, b_start, &edits, offset);
hunks.push(hunk);
offset = new_offset;
}
}
fn build_hunk(
a_start: usize,
b_start: usize,
edits: &[Edit],
mut offset: isize,
) -> (Hunk, isize) {
let mut counter: isize = -1;
let mut hunk = Hunk::new(a_start, b_start, vec![]);
while counter != 0 {
if offset >= 0 && counter > 0 {
hunk.edits.push(
get_edit(edits, offset)
.expect("offset out of bounds")
.clone(),
)
}
offset += 1;
if offset >= edits.len() as isize {
break;
}
if let Some(edit) = get_edit(edits, offset + HUNK_CONTEXT as isize) {
match edit.edit_type {
EditType::Ins | EditType::Del => {
counter = (2 * HUNK_CONTEXT + 1) as isize;
}
_ => {
counter -= 1;
}
}
} else {
counter -= 1;
}
}
(hunk, offset)
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/repository/mod.rs | src/repository/mod.rs | use crate::database::blob::Blob;
use crate::database::commit::Commit;
use crate::database::object::Object;
use crate::database::tree::TreeEntry;
use crate::database::Database;
use crate::database::ParsedObject;
use crate::index;
use crate::index::Index;
use crate::refs::Refs;
use crate::workspace::Workspace;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::fs;
use std::path::{Path, PathBuf};
pub mod migration;
use migration::Migration;
#[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)]
pub enum ChangeType {
Added,
Modified,
Deleted,
Untracked,
NoChange,
}
#[derive(Clone, Copy, Hash, Eq, PartialEq)]
enum ChangeKind {
Workspace,
Index,
}
pub struct Repository {
pub database: Database,
pub index: Index,
pub refs: Refs,
pub workspace: Workspace,
// status fields
pub root_path: PathBuf,
pub stats: HashMap<String, fs::Metadata>,
pub untracked: BTreeSet<String>,
pub changed: BTreeSet<String>,
pub workspace_changes: BTreeMap<String, ChangeType>,
pub index_changes: BTreeMap<String, ChangeType>,
pub head_tree: HashMap<String, TreeEntry>,
}
impl Repository {
pub fn new(root_path: &Path) -> Repository {
let git_path = root_path.join(".git");
let db_path = git_path.join("objects");
Repository {
database: Database::new(&db_path),
index: Index::new(&git_path.join("index")),
refs: Refs::new(&git_path),
workspace: Workspace::new(git_path.parent().unwrap()),
root_path: root_path.to_path_buf(),
stats: HashMap::new(),
untracked: BTreeSet::new(),
changed: BTreeSet::new(),
workspace_changes: BTreeMap::new(),
index_changes: BTreeMap::new(),
head_tree: HashMap::new(),
}
}
pub fn initialize_status(&mut self) -> Result<(), String> {
self.scan_workspace(&self.root_path.clone()).unwrap();
self.load_head_tree();
self.check_index_entries().map_err(|e| e.to_string())?;
self.collect_deleted_head_files();
Ok(())
}
fn collect_deleted_head_files(&mut self) {
let paths: Vec<String> = {
self.head_tree
.iter()
.map(|(path, _)| path.clone())
.collect()
};
for path in paths {
if !self.index.is_tracked_file(&path) {
self.record_change(&path, ChangeKind::Index, ChangeType::Deleted);
}
}
}
fn load_head_tree(&mut self) {
let head_oid = self.refs.read_head();
if let Some(head_oid) = head_oid {
let commit: Commit = {
if let ParsedObject::Commit(commit) = self.database.load(&head_oid) {
commit.clone()
} else {
panic!("HEAD points to a non-commit");
}
};
self.read_tree(&commit.tree_oid, Path::new(""));
}
}
fn read_tree(&mut self, tree_oid: &str, prefix: &Path) {
let entries = {
if let ParsedObject::Tree(tree) = self.database.load(tree_oid) {
tree.entries.clone()
} else {
BTreeMap::new()
}
};
for (name, entry) in entries {
let path = prefix.join(name);
if entry.is_tree() {
self.read_tree(&entry.get_oid(), &path);
} else {
self.head_tree
.insert(path.to_str().unwrap().to_string(), entry);
}
}
}
fn scan_workspace(&mut self, prefix: &Path) -> Result<(), std::io::Error> {
for (mut path, stat) in self.workspace.list_dir(prefix)? {
if self.index.is_tracked(&path) {
if self.workspace.is_dir(&path) {
self.scan_workspace(&self.workspace.abs_path(&path))?;
} else {
// path is file
self.stats.insert(path.to_string(), stat);
}
} else if self.is_trackable_path(&path, &stat)? {
if self.workspace.is_dir(&path) {
path.push('/');
}
self.untracked.insert(path);
}
}
Ok(())
}
fn check_index_entries(&mut self) -> Result<(), std::io::Error> {
let entries: Vec<index::Entry> = self
.index
.entries
.iter()
.map(|(_, entry)| entry.clone())
.collect();
for mut entry in entries {
self.check_index_against_workspace(&mut entry);
self.check_index_against_head_tree(&mut entry);
}
Ok(())
}
fn record_change(&mut self, path: &str, change_kind: ChangeKind, change_type: ChangeType) {
self.changed.insert(path.to_string());
let changes_map = match change_kind {
ChangeKind::Index => &mut self.index_changes,
ChangeKind::Workspace => &mut self.workspace_changes,
};
changes_map.insert(path.to_string(), change_type);
}
fn compare_index_to_workspace(
&self,
entry: Option<&index::Entry>,
stat: Option<&fs::Metadata>,
) -> ChangeType {
if entry.is_none() {
return ChangeType::Untracked;
}
if stat.is_none() {
return ChangeType::Deleted;
}
// Checks above ensure `entry` and `stat` are not None below
// this
let entry = entry.unwrap();
let stat = stat.unwrap();
if !entry.stat_match(&stat) {
return ChangeType::Modified;
}
if entry.times_match(&stat) {
return ChangeType::NoChange;
}
let data = self
.workspace
.read_file(&entry.path)
.expect("failed to read file");
let blob = Blob::new(data.as_bytes());
let oid = blob.get_oid();
if entry.oid != oid {
return ChangeType::Modified;
}
ChangeType::NoChange
}
fn compare_tree_to_index(
&self,
item: Option<&TreeEntry>,
entry: Option<&index::Entry>,
) -> ChangeType {
if item.is_none() && entry.is_none() {
return ChangeType::NoChange;
}
if item.is_none() {
return ChangeType::Added;
}
if entry.is_none() {
return ChangeType::Deleted;
}
// Checks above ensure `entry` and `stat` are not None below
// this
let entry = entry.unwrap();
let item = item.unwrap();
if !(entry.mode == item.mode() && entry.oid == item.get_oid()) {
return ChangeType::Modified;
}
ChangeType::NoChange
}
/// Adds modified entries to self.changed
fn check_index_against_workspace(&mut self, mut entry: &mut index::Entry) {
let stat = self.stats.get(&entry.path);
let status = self.compare_index_to_workspace(Some(entry), stat);
if status == ChangeType::NoChange {
let stat = stat.expect("empty stat");
self.index.update_entry_stat(&mut entry, &stat);
} else {
self.record_change(&entry.path, ChangeKind::Workspace, status);
}
}
fn check_index_against_head_tree(&mut self, entry: &mut index::Entry) {
let item = self.head_tree.get(&entry.path);
let status = self.compare_tree_to_index(item, Some(entry));
if status != ChangeType::NoChange {
self.record_change(&entry.path, ChangeKind::Index, status);
}
}
/// Check if path is trackable but not currently tracked
fn is_trackable_path(&self, path: &str, stat: &fs::Metadata) -> Result<bool, std::io::Error> {
if stat.is_file() {
return Ok(!self.index.is_tracked_file(path));
}
let items = self.workspace.list_dir(&self.workspace.abs_path(path))?;
let (files, dirs): (Vec<(&String, &fs::Metadata)>, Vec<(&String, &fs::Metadata)>) =
items.iter().partition(|(_path, stat)| stat.is_file());
for (file_path, file_stat) in files.iter() {
if self.is_trackable_path(file_path, file_stat)? {
return Ok(true);
}
}
for (dir_path, dir_stat) in dirs.iter() {
if self.is_trackable_path(dir_path, dir_stat)? {
return Ok(true);
}
}
Ok(false)
}
pub fn migration(
&mut self,
tree_diff: HashMap<PathBuf, (Option<TreeEntry>, Option<TreeEntry>)>,
) -> Migration {
Migration::new(self, tree_diff)
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/repository/migration.rs | src/repository/migration.rs | use crate::database::tree::TreeEntry;
use crate::index::Entry;
use crate::repository::{ChangeType, Repository};
use std::collections::{BTreeSet, HashMap, HashSet};
use std::fs;
use std::path::{Path, PathBuf};
lazy_static! {
static ref MESSAGES: HashMap<ConflictType, (&'static str, &'static str)> = {
let mut m = HashMap::new();
m.insert(
ConflictType::StaleFile,
(
"Your local changes to the following files would be overwritten by checkout:",
"Please commit your changes to stash them before you switch branches",
),
);
m.insert(
ConflictType::StaleDirectory,
(
"Updating the following directories would lose untracked files in them:",
"\n",
),
);
m.insert(
ConflictType::UntrackedOverwritten,
(
"The following untracked working tree files would be overwritten by checkout:",
"Please move or remove them before you switch branches",
),
);
m.insert(
ConflictType::UntrackedRemoved,
(
"The following untracked working tree files would be removed by checkout:",
"Please commit your changes to stash them before you switch branches",
),
);
m
};
}
pub struct Migration<'a> {
repo: &'a mut Repository,
diff: HashMap<PathBuf, (Option<TreeEntry>, Option<TreeEntry>)>,
pub changes: HashMap<Action, Vec<(PathBuf, Option<TreeEntry>)>>,
pub mkdirs: BTreeSet<PathBuf>,
pub rmdirs: BTreeSet<PathBuf>,
pub errors: Vec<String>,
pub conflicts: HashMap<ConflictType, HashSet<PathBuf>>,
}
#[derive(Hash, PartialEq, Eq)]
pub enum ConflictType {
StaleFile,
StaleDirectory,
UntrackedOverwritten,
UntrackedRemoved,
}
#[derive(Hash, PartialEq, Eq, Debug)]
pub enum Action {
Create,
Delete,
Update,
}
impl<'a> Migration<'a> {
pub fn new(
repo: &'a mut Repository,
tree_diff: HashMap<PathBuf, (Option<TreeEntry>, Option<TreeEntry>)>,
) -> Migration<'a> {
// TODO: can be a struct instead(?)
let mut changes = HashMap::new();
changes.insert(Action::Create, vec![]);
changes.insert(Action::Delete, vec![]);
changes.insert(Action::Update, vec![]);
let conflicts = {
let mut m = HashMap::new();
m.insert(ConflictType::StaleFile, HashSet::new());
m.insert(ConflictType::StaleDirectory, HashSet::new());
m.insert(ConflictType::UntrackedOverwritten, HashSet::new());
m.insert(ConflictType::UntrackedRemoved, HashSet::new());
m
};
Migration {
repo,
diff: tree_diff,
changes,
mkdirs: BTreeSet::new(),
rmdirs: BTreeSet::new(),
errors: vec![],
conflicts,
}
}
pub fn apply_changes(&mut self) -> Result<(), String> {
match self.plan_changes() {
Ok(_) => (),
Err(errors) => return Err(errors.join("\n")),
}
self.update_workspace()?;
self.update_index();
Ok(())
}
fn plan_changes(&mut self) -> Result<(), Vec<String>> {
for (path, (old_item, new_item)) in self.diff.clone() {
self.check_for_conflict(&path, &old_item, &new_item);
self.record_change(&path, old_item, new_item);
}
self.collect_errors()
}
fn insert_conflict(&mut self, conflict_type: &ConflictType, path: &Path) {
if let Some(conflicts) = self.conflicts.get_mut(conflict_type) {
conflicts.insert(path.to_path_buf());
}
}
fn check_for_conflict(
&mut self,
path: &Path,
old_item: &Option<TreeEntry>,
new_item: &Option<TreeEntry>,
) {
let path_str = path.to_str().unwrap();
let entry = self.repo.index.entry_for_path(path_str).cloned();
if self.index_differs_from_trees(entry.as_ref(), old_item.as_ref(), new_item.as_ref()) {
self.insert_conflict(&ConflictType::StaleFile, &path);
return;
}
let stat = self.repo.workspace.stat_file(path_str).ok();
let error_type = self.get_error_type(&stat, &entry.as_ref(), new_item);
if stat.is_none() {
let parent = self.untracked_parent(path);
if parent.is_some() {
let parent = parent.unwrap();
let conflict_path = if entry.is_some() { path } else { &parent };
self.insert_conflict(&error_type, conflict_path);
}
} else if Self::stat_is_file(&stat) {
let changed = self
.repo
.compare_index_to_workspace(entry.as_ref(), stat.as_ref());
if changed != ChangeType::NoChange {
self.insert_conflict(&error_type, path);
}
} else if Self::stat_is_dir(&stat) {
let trackable = self
.repo
.is_trackable_path(path_str, &stat.unwrap())
.ok()
.unwrap_or(false);
if trackable {
self.insert_conflict(&error_type, path);
}
}
}
fn untracked_parent(&self, path: &'a Path) -> Option<PathBuf> {
let dirname = path.parent().expect("failed to get dirname");
for parent in dirname.ancestors() {
let parent_path_str = parent.to_str().unwrap();
if parent_path_str == "" {
continue;
}
if let Ok(parent_stat) = self.repo.workspace.stat_file(parent_path_str) {
if parent_stat.is_dir() {
continue;
}
if self
.repo
.is_trackable_path(parent_path_str, &parent_stat)
.unwrap_or(false)
{
return Some(parent.to_path_buf());
}
}
}
None
}
fn stat_is_dir(stat: &Option<fs::Metadata>) -> bool {
match stat {
None => false,
Some(stat) => stat.is_dir(),
}
}
fn stat_is_file(stat: &Option<fs::Metadata>) -> bool {
match stat {
None => false,
Some(stat) => stat.is_file(),
}
}
fn get_error_type(
&self,
stat: &Option<fs::Metadata>,
entry: &Option<&Entry>,
item: &Option<TreeEntry>,
) -> ConflictType {
if entry.is_some() {
ConflictType::StaleFile
} else if Self::stat_is_dir(&stat) {
ConflictType::StaleDirectory
} else if item.is_some() {
ConflictType::UntrackedOverwritten
} else {
ConflictType::UntrackedRemoved
}
}
fn index_differs_from_trees(
&self,
entry: Option<&Entry>,
old_item: Option<&TreeEntry>,
new_item: Option<&TreeEntry>,
) -> bool {
self.repo.compare_tree_to_index(old_item, entry) != ChangeType::NoChange
&& self.repo.compare_tree_to_index(new_item, entry) != ChangeType::NoChange
}
fn collect_errors(&mut self) -> Result<(), Vec<String>> {
for (conflict_type, paths) in &self.conflicts {
if paths.is_empty() {
continue;
}
let (header, footer) = MESSAGES.get(&conflict_type).unwrap();
let mut error = vec![header.to_string()];
for p in paths {
error.push(format!("\t{}", p.to_str().unwrap()));
}
error.push(footer.to_string());
error.push("\n".to_string());
self.errors.push(error[..].join("\n"));
}
if !self.errors.is_empty() {
return Err(self.errors.clone());
}
Ok(())
}
fn record_change(
&mut self,
path: &Path,
old_item: Option<TreeEntry>,
new_item: Option<TreeEntry>,
) {
let path_ancestors: BTreeSet<_> = path
.parent()
.expect("could not find parent")
.ancestors()
.map(|p| p.to_path_buf())
.filter(|p| p.parent().is_some()) // filter out root path
.collect();
let action = if old_item.is_none() {
self.mkdirs = self.mkdirs.union(&path_ancestors).cloned().collect();
Action::Create
} else if new_item.is_none() {
self.rmdirs = self.rmdirs.union(&path_ancestors).cloned().collect();
Action::Delete
} else {
self.mkdirs = self.mkdirs.union(&path_ancestors).cloned().collect();
Action::Update
};
if let Some(action_changes) = self.changes.get_mut(&action) {
action_changes.push((path.to_path_buf(), new_item));
}
}
fn update_workspace(&mut self) -> Result<(), String> {
self.repo.workspace.apply_migration(
&mut self.repo.database,
&self.changes,
&self.rmdirs,
&self.mkdirs,
)
}
fn update_index(&mut self) {
for (path, _) in self.changes.get(&Action::Delete).unwrap() {
self.repo
.index
.remove(path.to_str().expect("failed to convert path to str"));
}
for action in &[Action::Create, Action::Update] {
for (path, entry) in self.changes.get(action).unwrap() {
let path = path.to_str().expect("failed to convert path to str");
let entry_oid = entry.clone().unwrap().get_oid();
let stat = self
.repo
.workspace
.stat_file(path)
.expect("failed to stat file");
self.repo.index.add(path, &entry_oid, &stat);
}
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/database/object.rs | src/database/object.rs | use crate::database::ParsedObject;
use crypto::digest::Digest;
use crypto::sha1::Sha1;
pub trait Object {
fn r#type(&self) -> String;
fn to_string(&self) -> Vec<u8>;
fn parse(s: &[u8]) -> ParsedObject;
fn get_oid(&self) -> String {
let mut hasher = Sha1::new();
hasher.input(&self.get_content());
hasher.result_str()
}
fn get_content(&self) -> Vec<u8> {
// TODO: need to do something to force ASCII encoding?
let string = self.to_string();
let mut content: Vec<u8> = self.r#type().as_bytes().to_vec();
content.push(0x20);
content.extend_from_slice(format!("{}", string.len()).as_bytes());
content.push(0x0);
content.extend_from_slice(&string);
content
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/database/tree.rs | src/database/tree.rs | use crate::database::object::Object;
use crate::database::{Entry, ParsedObject};
use crate::util::*;
use std::collections::{BTreeMap};
use std::path::{Path};
use std::str;
pub const TREE_MODE: u32 = 0o40000;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum TreeEntry {
Entry(Entry),
Tree(Tree),
}
impl TreeEntry {
pub fn mode(&self) -> u32 {
match self {
TreeEntry::Entry(e) => e.mode(),
_ => TREE_MODE,
}
}
pub fn get_oid(&self) -> String {
match self {
TreeEntry::Entry(e) => e.oid.clone(),
TreeEntry::Tree(t) => t.get_oid(),
}
}
pub fn is_tree(&self) -> bool {
match self {
TreeEntry::Entry(e) => e.mode() == TREE_MODE,
_ => false,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Tree {
pub entries: BTreeMap<String, TreeEntry>,
}
impl Tree {
pub fn new() -> Tree {
Tree {
entries: BTreeMap::new(),
}
}
pub fn build(entries: &[Entry]) -> Tree {
let mut sorted_entries = entries.to_vec();
sorted_entries.sort();
let mut root = Tree::new();
for entry in sorted_entries.iter() {
let mut path: Vec<String> = Path::new(&entry.name)
.iter()
.map(|c| c.to_str().unwrap().to_string())
.collect();
let name = path.pop().expect("file path has zero components");
root.add_entry(&path, name, entry.clone());
}
root
}
pub fn add_entry(&mut self, path: &[String], name: String, entry: Entry) {
if path.is_empty() {
self.entries.insert(name, TreeEntry::Entry(entry));
} else if let Some(TreeEntry::Tree(tree)) = self.entries.get_mut(&path[0]) {
tree.add_entry(&path[1..], name, entry);
} else {
let mut tree = Tree::new();
tree.add_entry(&path[1..], name, entry);
self.entries.insert(path[0].clone(), TreeEntry::Tree(tree));
};
}
pub fn traverse<F>(&self, f: &F)
where
F: Fn(&Tree) -> (),
{
// Do a postorder traversal(visit all children first, then
// process `self`
for (_name, entry) in self.entries.clone() {
if let TreeEntry::Tree(tree) = entry {
tree.traverse(f);
}
}
f(self);
}
}
impl Object for Tree {
fn r#type(&self) -> String {
"tree".to_string()
}
fn to_string(&self) -> Vec<u8> {
let mut tree_vec = Vec::new();
for (name, entry) in self.entries.iter() {
let mut entry_vec: Vec<u8> =
format!("{:o} {}\0", entry.mode(), name).as_bytes().to_vec();
entry_vec.extend_from_slice(&decode_hex(&entry.get_oid()).expect("invalid oid"));
tree_vec.extend_from_slice(&entry_vec);
}
tree_vec
}
fn parse(v: &[u8]) -> ParsedObject {
let mut entries: Vec<Entry> = vec![];
let mut vs = v;
while !vs.is_empty() {
let (mode, rest): (u32, &[u8]) = match vs
.splitn(2, |c| *c as char == ' ')
.collect::<Vec<&[u8]>>()
.as_slice()
{
&[mode, rest] => (
u32::from_str_radix(str::from_utf8(mode).expect("invalid utf8"), 8)
.expect("parsing mode failed"),
rest,
),
_ => panic!("EOF while parsing mode"),
};
vs = rest;
let (name, rest) = match *vs
.splitn(2, |c| *c as char == '\u{0}')
.collect::<Vec<&[u8]>>()
.as_slice()
{
[name_bytes, rest] => (str::from_utf8(name_bytes).expect("invalid utf8"), rest),
_ => panic!("EOF while parsing name"),
};
vs = rest;
let (oid_bytes, rest) = vs.split_at(20);
vs = rest;
let oid = encode_hex(&oid_bytes);
entries.push(Entry::new(name, &oid, mode));
}
ParsedObject::Tree(Tree::build(&entries))
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/database/commit.rs | src/database/commit.rs | use chrono::prelude::*;
use std::collections::HashMap;
use std::str;
use crate::database::{Object, ParsedObject};
#[derive(Debug, Clone)]
pub struct Author {
pub name: String,
pub email: String,
pub time: DateTime<FixedOffset>,
}
impl Author {
fn to_string(&self) -> String {
format!(
"{} <{}> {}",
self.name,
self.email,
self.time.format("%s %z")
)
}
pub fn short_date(&self) -> String {
self.time.format("%Y-%m-%d").to_string()
}
pub fn readable_time(&self) -> String {
self.time.format("%a %b %-d %H:%M:%S %Y %Z").to_string()
}
pub fn parse(s: &str) -> Author {
let split_author_str = s
.split(&['<', '>'][..])
.map(|s| s.trim())
.collect::<Vec<_>>();
let name = split_author_str[0].to_string();
let email = split_author_str[1].to_string();
let time = DateTime::parse_from_str(split_author_str[2], "%s %z")
.expect("could not parse datetime");
Author { name, email, time }
}
}
#[derive(Debug, Clone)]
pub struct Commit {
pub parent: Option<String>,
pub tree_oid: String,
pub author: Author,
pub message: String,
}
impl Commit {
pub fn new(
parent: &Option<String>,
tree_oid: String,
author: Author,
message: String,
) -> Commit {
Commit {
parent: parent.clone(),
tree_oid,
author,
message,
}
}
pub fn title_line(&self) -> String {
self.message
.lines()
.next()
.expect("could not get first line of commit")
.to_string()
}
}
impl Object for Commit {
fn r#type(&self) -> String {
"commit".to_string()
}
fn to_string(&self) -> Vec<u8> {
let author_str = self.author.to_string();
let mut lines = String::new();
lines.push_str(&format!("tree {}\n", self.tree_oid));
if let Some(parent_oid) = &self.parent {
lines.push_str(&format!("parent {}\n", parent_oid));
}
lines.push_str(&format!("author {}\n", author_str));
lines.push_str(&format!("committer {}\n", author_str));
lines.push_str("\n");
lines.push_str(&self.message);
lines.as_bytes().to_vec()
}
fn parse(s: &[u8]) -> ParsedObject {
let mut s = str::from_utf8(s).expect("invalid utf-8");
let mut headers = HashMap::new();
// Parse headers
loop {
if let Some(newline) = s.find('\n') {
let line = &s[..newline];
s = &s[newline + 1..];
// Headers and commit message is separated by empty
// line
if line == "" {
break;
}
let v: Vec<&str> = line.splitn(2, ' ').collect();
headers.insert(v[0], v[1]);
} else {
panic!("no body in commit");
}
}
ParsedObject::Commit(Commit::new(
&headers.get("parent").map(|s| s.to_string()),
headers.get("tree").expect("no tree header").to_string(),
Author::parse(headers.get("author").expect("no author found in commit")),
s.to_string(),
))
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/database/mod.rs | src/database/mod.rs | use std::collections::HashMap;
use std::fs::{self, OpenOptions};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::str;
use flate2::read::ZlibDecoder;
use flate2::write::ZlibEncoder;
use flate2::Compression;
use crate::index;
use crate::util::*;
pub mod blob;
pub mod commit;
pub mod object;
pub mod tree;
pub mod tree_diff;
use blob::Blob;
use commit::Commit;
use object::Object;
use tree::{Tree, TREE_MODE};
#[derive(Debug)]
pub enum ParsedObject {
Commit(Commit),
Blob(Blob),
Tree(Tree),
}
impl ParsedObject {
pub fn obj_type(&self) -> &str {
match *self {
ParsedObject::Commit(_) => "commit",
ParsedObject::Blob(_) => "blob",
ParsedObject::Tree(_) => "tree",
}
}
pub fn get_oid(&self) -> String {
match self {
ParsedObject::Commit(obj) => obj.get_oid(),
ParsedObject::Blob(obj) => obj.get_oid(),
ParsedObject::Tree(obj) => obj.get_oid(),
}
}
}
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
pub struct Entry {
name: String,
oid: String,
mode: u32,
}
impl From<&index::Entry> for Entry {
fn from(entry: &index::Entry) -> Entry {
Entry {
name: entry.path.clone(),
oid: entry.oid.clone(),
mode: entry.mode,
}
}
}
impl Entry {
pub fn new(name: &str, oid: &str, mode: u32) -> Entry {
Entry {
name: name.to_string(),
oid: oid.to_string(),
mode,
}
}
// if user is allowed to executable, set mode to Executable,
// else Regular
fn is_executable(&self) -> bool {
(self.mode >> 6) & 0b1 == 1
}
fn mode(&self) -> u32 {
if self.mode == TREE_MODE {
return TREE_MODE;
}
if self.is_executable() {
return 0o100755;
} else {
return 0o100644;
}
}
}
pub struct Database {
path: PathBuf,
objects: HashMap<String, ParsedObject>,
}
impl Database {
pub fn new(path: &Path) -> Database {
Database {
path: path.to_path_buf(),
objects: HashMap::new(),
}
}
pub fn read_object(&self, oid: &str) -> Option<ParsedObject> {
let mut contents = vec![];
let mut file = OpenOptions::new()
.read(true)
.create(false)
.open(self.object_path(oid))
.unwrap_or_else(|_| panic!("failed to open file: {:?}", self.object_path(oid)));
file.read_to_end(&mut contents)
.expect("reading file failed");
let mut z = ZlibDecoder::new(&contents[..]);
let mut v = vec![];
z.read_to_end(&mut v).unwrap();
let mut vs = &v[..];
let (obj_type, rest) = match vs
.splitn(2, |c| *c as char == ' ')
.collect::<Vec<&[u8]>>()
.as_slice()
{
&[type_bytes, rest] => (
str::from_utf8(type_bytes).expect("failed to parse type"),
rest,
),
_ => panic!("EOF while parsing type"),
};
vs = rest;
let (_size, rest) = match *vs
.splitn(2, |c| *c as char == '\u{0}')
.collect::<Vec<&[u8]>>()
.as_slice()
{
[size_bytes, rest] => (
str::from_utf8(size_bytes).expect("failed to parse size"),
rest,
),
_ => panic!("EOF while parsing size"),
};
match obj_type {
"commit" => Some(Commit::parse(&rest)),
"blob" => Some(Blob::parse(&rest)),
"tree" => Some(Tree::parse(&rest)),
_ => unimplemented!(),
}
}
pub fn load(&mut self, oid: &str) -> &ParsedObject {
let o = self.read_object(oid);
self.objects.insert(oid.to_string(), o.unwrap());
self.objects.get(oid).unwrap()
}
pub fn store<T>(&self, obj: &T) -> Result<(), std::io::Error>
where
T: Object,
{
let oid = obj.get_oid();
let content = obj.get_content();
self.write_object(oid, content)
}
fn object_path(&self, oid: &str) -> PathBuf {
let dir: &str = &oid[0..2];
let filename: &str = &oid[2..];
self.path.as_path().join(dir).join(filename)
}
fn write_object(&self, oid: String, content: Vec<u8>) -> Result<(), std::io::Error> {
let object_path = self.object_path(&oid);
// If object already exists, we are certain that the contents
// have not changed. So there is no need to write it again.
if object_path.exists() {
return Ok(());
}
let dir_path = object_path.parent().expect("invalid parent path");
fs::create_dir_all(dir_path)?;
let mut temp_file_name = String::from("tmp_obj_");
temp_file_name.push_str(&generate_temp_name());
let temp_path = dir_path.join(temp_file_name);
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create_new(true)
.open(&temp_path)?;
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(&content)?;
let compressed_bytes = e.finish()?;
file.write_all(&compressed_bytes)?;
fs::rename(temp_path, object_path)?;
Ok(())
}
pub fn short_oid(oid: &str) -> &str {
&oid[0..6]
}
pub fn prefix_match(&self, name: &str) -> Vec<String> {
let object_path = self.object_path(name);
let dirname = object_path
.parent()
.expect("Could not get parent from object_path");
let oids: Vec<_> = fs::read_dir(&dirname)
.expect("read_dir call failed")
.map(|f| {
format!(
"{}{}",
dirname
.file_name()
.expect("could not get filename")
.to_str()
.expect("conversion from OsStr to str failed"),
f.unwrap()
.file_name()
.to_str()
.expect("conversion from OsStr to str failed")
)
})
.filter(|o| o.starts_with(name))
.collect();
oids
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/database/blob.rs | src/database/blob.rs | use crate::database::object::Object;
use crate::database::ParsedObject;
#[derive(Debug)]
pub struct Blob {
pub data: Vec<u8>,
}
impl Blob {
pub fn new(data: &[u8]) -> Blob {
Blob {
data: data.to_vec(),
}
}
}
impl Object for Blob {
fn r#type(&self) -> String {
"blob".to_string()
}
fn to_string(&self) -> Vec<u8> {
self.data.clone()
}
fn parse(s: &[u8]) -> ParsedObject {
ParsedObject::Blob(Blob::new(s))
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
samrat/rug | https://github.com/samrat/rug/blob/40f81c7ba5da19890dd234dba4ca26634bc64d78/src/database/tree_diff.rs | src/database/tree_diff.rs | use crate::database::tree::TreeEntry;
use crate::database::{Database, ParsedObject, Tree};
use std::collections::{BTreeMap, HashMap};
use std::path::{Path, PathBuf};
pub struct TreeDiff<'a> {
database: &'a mut Database,
pub changes: HashMap<PathBuf, (Option<TreeEntry>, Option<TreeEntry>)>,
}
impl<'a> TreeDiff<'a> {
pub fn new(database: &mut Database) -> TreeDiff {
TreeDiff {
database,
changes: HashMap::new(),
}
}
pub fn compare_oids(&mut self, a: Option<String>, b: Option<String>, prefix: &Path) {
if a == b {
return;
}
let a_entries = if let Some(a_oid) = a {
self.oid_to_tree(&a_oid).entries
} else {
BTreeMap::new()
};
let b_entries = if let Some(b_oid) = b {
self.oid_to_tree(&b_oid).entries
} else {
BTreeMap::new()
};
self.detect_deletions(&a_entries, &b_entries, prefix);
self.detect_additions(&a_entries, &b_entries, prefix);
}
fn detect_deletions(
&mut self,
a_entries: &BTreeMap<String, TreeEntry>,
b_entries: &BTreeMap<String, TreeEntry>,
prefix: &Path,
) {
for (name, entry) in a_entries {
let path = prefix.join(name);
let other = b_entries.get(name);
let tree_b = if let Some(b_entry) = other {
if b_entry == entry {
continue;
}
if b_entry.is_tree() {
Some(b_entry.get_oid())
} else {
None
}
} else {
None
};
let tree_a = if entry.is_tree() {
Some(entry.get_oid())
} else {
None
};
self.compare_oids(tree_a, tree_b, &path);
let blobs = match (!entry.is_tree(), other.map(|e| !e.is_tree()).unwrap_or(false)) {
(true, true) => (Some(entry.clone()), other.cloned()),
(true, false) => (Some(entry.clone()), None),
(false, true) => (None, other.cloned()),
(false, false) => continue,
};
self.changes.insert(path, blobs);
}
}
fn detect_additions(
&mut self,
a_entries: &BTreeMap<String, TreeEntry>,
b_entries: &BTreeMap<String, TreeEntry>,
prefix: &Path,
) {
for (name, entry) in b_entries {
let path = prefix.join(name);
let other = a_entries.get(name);
if other.is_some() {
continue;
}
if entry.is_tree() {
self.compare_oids(None, Some(entry.get_oid()), &path);
} else {
self.changes.insert(path, (None, Some(entry.clone())));
}
}
}
fn oid_to_tree(&mut self, oid: &str) -> Tree {
let tree_oid = match self.database.load(oid) {
ParsedObject::Tree(tree) => return tree.clone(),
ParsedObject::Commit(commit) => commit.tree_oid.clone(),
_ => panic!("oid not a commit or tree"),
};
match self.database.load(&tree_oid) {
ParsedObject::Tree(tree) => tree.clone(),
_ => panic!("oid not a tree"),
}
}
}
| rust | MIT | 40f81c7ba5da19890dd234dba4ca26634bc64d78 | 2026-01-04T20:18:11.106865Z | false |
LeoBorai/local-ip-address | https://github.com/LeoBorai/local-ip-address/blob/eed9856f5e2ee03e2490922867337b24d8000a91/src/unix.rs | src/unix.rs | use std::alloc::{alloc, dealloc, Layout};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use libc::{
getifaddrs, strlen, c_char, ifaddrs, sockaddr_in, sockaddr_in6, AF_INET, AF_INET6, IFF_LOOPBACK,
};
use crate::Error;
/// `ifaddrs` struct raw pointer alias
type IfAddrsPtr = *mut *mut ifaddrs;
/// Perform a search over the system's network interfaces using `getifaddrs`,
/// retrieved network interfaces belonging to both socket address families
/// `AF_INET` and `AF_INET6` are retrieved along with the interface address name.
///
/// # Example
///
/// ```
/// use std::net::IpAddr;
/// use local_ip_address::list_afinet_netifas;
///
/// let ifas = list_afinet_netifas().unwrap();
///
/// if let Some((_, ipaddr)) = ifas
/// .iter()
/// .find(|(name, ipaddr)| (*name == "en0" || *name == "epair0b") && matches!(ipaddr, IpAddr::V4(_))) {
/// // This is your local IP address: 192.168.1.111
/// println!("This is your local IP address: {:?}", ipaddr);
/// }
/// ```
pub fn list_afinet_netifas() -> Result<Vec<(String, IpAddr)>, Error> {
match list_afinet_netifas_info() {
Ok(interfaces) => Ok(interfaces
.iter()
.map(|i| (i.iname.clone(), i.addr))
.collect()),
Err(e) => Err(e),
}
}
pub(crate) struct AfInetInfo {
pub addr: IpAddr,
pub iname: String,
pub is_loopback: bool,
}
impl AfInetInfo {
/// Determines if an interface is used for mobile_data
/// https://chromium.googlesource.com/external/webrtc/+/branch-heads/m73/rtc_base/network.cc#205
#[cfg(target_os = "android")]
pub(crate) fn is_mobile_data(&self) -> bool {
self.iname.contains("rmnet_data")
}
#[cfg(target_os = "ios")]
pub(crate) fn is_mobile_data(&self) -> bool {
self.iname.contains("pdp_ip")
}
#[cfg(any(
target_os = "linux",
target_os = "windows",
target_os = "macos",
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
))]
pub(crate) fn is_mobile_data(&self) -> bool {
false
}
#[allow(dead_code)]
pub(crate) fn is_local_network(&self) -> bool {
self.iname.contains("eth") || self.iname.contains("wlan") || self.iname.contains("en")
}
}
// Internal method to list AF_INET info in a struct. This method is used by
// list_afiinet_netifas and local_ip,
pub(crate) fn list_afinet_netifas_info() -> Result<Vec<AfInetInfo>, Error> {
unsafe {
let layout = Layout::new::<IfAddrsPtr>();
let ptr = alloc(layout);
let myaddr = ptr as IfAddrsPtr;
let getifaddrs_result = getifaddrs(myaddr);
if getifaddrs_result != 0 {
// an error occurred on getifaddrs
return Err(Error::StrategyError(format!(
"GetIfAddrs returned error: {}",
getifaddrs_result
)));
}
let mut interfaces: Vec<AfInetInfo> = Vec::new();
let ifa = myaddr;
// An instance of `ifaddrs` is build on top of a linked list where
// `ifaddrs.ifa_next` represent the next node in the list.
//
// To find the relevant interface address walk over the nodes of the
// linked list looking for interface address which belong to the socket
// address families AF_INET (IPv4) and AF_INET6 (IPv6)
loop {
let ifa_addr = (**ifa).ifa_addr;
if !ifa_addr.is_null() {
match (*ifa_addr).sa_family as i32 {
// AF_INET IPv4 protocol implementation
AF_INET => {
let interface_address = ifa_addr;
let socket_addr_v4: *mut sockaddr_in =
interface_address as *mut sockaddr_in;
let in_addr = (*socket_addr_v4).sin_addr;
let mut ip_addr = Ipv4Addr::from(in_addr.s_addr);
if cfg!(target_endian = "little") {
// due to a difference on how bytes are arranged on a
// single word of memory by the CPU, swap bytes based
// on CPU endianness to avoid having twisted IP addresses
//
// refer: https://github.com/rust-lang/rust/issues/48819
ip_addr = Ipv4Addr::from(in_addr.s_addr.swap_bytes());
}
interfaces.push(AfInetInfo {
addr: IpAddr::V4(ip_addr),
iname: get_ifa_name(ifa)?,
is_loopback: is_loopback_addr(ifa),
});
}
// AF_INET6 IPv6 protocol implementation
AF_INET6 => {
let interface_address = ifa_addr;
let socket_addr_v6: *mut sockaddr_in6 =
interface_address as *mut sockaddr_in6;
let in6_addr = (*socket_addr_v6).sin6_addr;
let ip_addr = Ipv6Addr::from(in6_addr.s6_addr);
interfaces.push(AfInetInfo {
addr: IpAddr::V6(ip_addr),
iname: get_ifa_name(ifa)?,
is_loopback: is_loopback_addr(ifa),
});
}
_ => {}
}
}
// Check if we are at the end of our network interface list
*ifa = (**ifa).ifa_next;
if (*ifa).is_null() {
break;
}
}
dealloc(ptr, layout);
Ok(interfaces)
}
}
/// Retrieves the name of a interface address
unsafe fn get_ifa_name(ifa: *mut *mut ifaddrs) -> Result<String, Error> {
unsafe {
let str = (*(*ifa)).ifa_name;
let len = strlen(str as *const c_char);
let slice = std::slice::from_raw_parts(str as *mut u8, len);
match String::from_utf8(slice.to_vec()) {
Ok(s) => Ok(s),
Err(e) => Err(Error::StrategyError(format!(
"Failed to retrieve interface name. The name is not a valid UTF-8 string. {}",
e
))),
}
}
}
/// Determines if an interface address is a loopback address
unsafe fn is_loopback_addr(ifa: *mut *mut ifaddrs) -> bool {
unsafe {
let iflags = (*(*ifa)).ifa_flags as i32;
(iflags & IFF_LOOPBACK) != 0
}
}
| rust | Apache-2.0 | eed9856f5e2ee03e2490922867337b24d8000a91 | 2026-01-04T20:18:14.206471Z | false |
LeoBorai/local-ip-address | https://github.com/LeoBorai/local-ip-address/blob/eed9856f5e2ee03e2490922867337b24d8000a91/src/lib.rs | src/lib.rs | /*!
# Local IP Address
Retrieve system's local IP address and Network Interfaces/Adapters on
Linux, Windows, and macOS (and other BSD-based systems).
## Usage
Get the local IP address of your system by executing the `local_ip` function:
```rust
use local_ip_address::local_ip;
let my_local_ip = local_ip();
if let Ok(my_local_ip) = my_local_ip {
println!("This is my local IP address: {:?}", my_local_ip);
} else {
println!("Error getting local IP: {:?}", my_local_ip);
}
```
Retrieve all the available network interfaces from both, the `AF_INET` and
the `AF_INET6` family by executing the `list_afinet_netifas` function:
```rust
use local_ip_address::list_afinet_netifas;
let network_interfaces = list_afinet_netifas();
if let Ok(network_interfaces) = network_interfaces {
for (name, ip) in network_interfaces.iter() {
println!("{}:\t{:?}", name, ip);
}
} else {
println!("Error getting network interfaces: {:?}", network_interfaces);
}
```
Underlying approach on retrieving network interfaces or the local IP address
may differ based on the running operative system.
OS | Approach
--- | ---
Linux | Establishes a Netlink socket interchange to retrieve network interfaces
BSD-based & Android | Uses of `getifaddrs` to retrieve network interfaces
Windows | Consumes Win32 API's to retrieve the network adapters table
Supported BSD-based systems include:
- macOS
- FreeBSD
- OpenBSD
- NetBSD
- DragonFly
*/
use std::net::IpAddr;
mod error;
pub use error::Error;
#[cfg(target_os = "linux")]
pub mod linux;
#[cfg(target_os = "linux")]
pub use crate::linux::*;
#[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "macos",
target_os = "android",
target_os = "ios",
))]
pub mod unix;
#[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "macos",
target_os = "android",
target_os = "ios",
))]
pub use crate::unix::*;
#[cfg(target_family = "windows")]
pub mod windows;
#[cfg(target_family = "windows")]
pub use crate::windows::*;
/// Retrieves the local IPv4 address of the machine in the local network from
/// the `AF_INET` family.
///
/// A different approach is taken based on the operative system.
///
/// For linux based systems the Netlink socket communication is used to
/// retrieve the local network interface.
///
/// For BSD-based systems the `getifaddrs` approach is taken using `libc`
///
/// For Windows systems Win32's IP Helper is used to gather the Local IP
/// address
pub fn local_ip() -> Result<IpAddr, Error> {
#[cfg(target_os = "linux")]
{
crate::linux::local_ip()
}
#[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "macos",
target_os = "android",
target_os = "ios",
))]
{
let ifas = crate::unix::list_afinet_netifas_info()?;
ifas.into_iter()
.find_map(|ifa| {
if !ifa.is_loopback && ifa.addr.is_ipv4() && !ifa.is_mobile_data() {
Some(ifa.addr)
} else {
None
}
})
.ok_or(Error::LocalIpAddressNotFound)
}
#[cfg(target_os = "windows")]
{
use windows_sys::Win32::Networking::WinSock::AF_INET;
let ip_addresses = crate::windows::list_local_ip_addresses(AF_INET)?;
ip_addresses
.into_iter()
.find(|ip_address| matches!(ip_address, IpAddr::V4(_)))
.ok_or(Error::LocalIpAddressNotFound)
}
// A catch-all case to error if not implemented for OS
#[cfg(not(any(
target_os = "linux",
target_os = "windows",
target_os = "macos",
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "android",
target_os = "ios",
)))]
{
Err(Error::PlatformNotSupported(
std::env::consts::OS.to_string(),
))
}
}
/// Retrieves the local IPv6 address of the machine in the local network from
/// the `AF_INET6` family.
///
/// A different approach is taken based on the operative system.
///
/// For linux based systems the Netlink socket communication is used to
/// retrieve the local network interface.
///
/// For BSD-based systems the `getifaddrs` approach is taken using `libc`
///
/// For Windows systems Win32's IP Helper is used to gather the Local IP
/// address
pub fn local_ipv6() -> Result<IpAddr, Error> {
#[cfg(target_os = "linux")]
{
crate::linux::local_ipv6()
}
#[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "macos",
target_os = "android",
target_os = "ios",
))]
{
let ifas = crate::unix::list_afinet_netifas_info()?;
ifas.into_iter()
.find_map(|ifa| {
if !ifa.is_loopback && ifa.addr.is_ipv6() && !ifa.is_mobile_data() {
Some(ifa.addr)
} else {
None
}
})
.ok_or(Error::LocalIpAddressNotFound)
}
#[cfg(target_os = "windows")]
{
use windows_sys::Win32::Networking::WinSock::AF_INET6;
let ip_addresses = crate::windows::list_local_ip_addresses(AF_INET6)?;
ip_addresses
.into_iter()
.find(|ip_address| matches!(ip_address, IpAddr::V6(_)))
.ok_or(Error::LocalIpAddressNotFound)
}
// A catch-all case to error if not implemented for OS
#[cfg(not(any(
target_os = "linux",
target_os = "windows",
target_os = "macos",
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "android",
target_os = "ios",
)))]
{
Err(Error::PlatformNotSupported(
std::env::consts::OS.to_string(),
))
}
}
// A catch-all function to error if not implemented for OS
#[cfg(not(any(
target_os = "linux",
target_os = "windows",
target_os = "macos",
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "android",
target_os = "ios",
)))]
pub fn list_afinet_netifas() -> Result<Vec<(String, IpAddr)>, Error> {
Err(Error::PlatformNotSupported(
std::env::consts::OS.to_string(),
))
}
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
#[cfg(target_os = "linux")]
fn find_local_ip() {
let my_local_ip = local_ip();
println!("Linux 'local_ip': {:?}", my_local_ip);
assert!(matches!(my_local_ip, Ok(IpAddr::V4(_))));
}
#[test]
#[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "macos",
target_os = "android",
target_os = "ios",
))]
fn find_local_ip() {
let my_local_ip = local_ip();
println!("Unix 'local_ip': {:?}", my_local_ip);
assert!(matches!(my_local_ip, Ok(IpAddr::V4(_))));
}
#[test]
#[cfg(target_os = "windows")]
fn find_local_ip() {
let my_local_ip = local_ip();
println!("Windows 'local_ip': {:?}", my_local_ip);
assert!(matches!(my_local_ip, Ok(IpAddr::V4(_))));
}
#[test]
#[cfg(target_os = "linux")]
fn find_network_interfaces() {
let network_interfaces = list_afinet_netifas();
println!("Linux 'list_afinet_netifas': {:?}", network_interfaces);
assert!(network_interfaces.is_ok());
assert!(!network_interfaces.unwrap().is_empty());
}
#[test]
#[cfg(any(
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
target_os = "macos",
target_os = "android",
target_os = "ios",
))]
fn find_network_interfaces() {
let network_interfaces = list_afinet_netifas();
assert!(network_interfaces.is_ok());
assert!(!network_interfaces.unwrap().is_empty());
}
#[test]
#[cfg(target_os = "windows")]
fn find_network_interfaces() {
let network_interfaces = list_afinet_netifas();
assert!(network_interfaces.is_ok());
assert!(!network_interfaces.unwrap().is_empty());
}
}
| rust | Apache-2.0 | eed9856f5e2ee03e2490922867337b24d8000a91 | 2026-01-04T20:18:14.206471Z | false |
LeoBorai/local-ip-address | https://github.com/LeoBorai/local-ip-address/blob/eed9856f5e2ee03e2490922867337b24d8000a91/src/linux.rs | src/linux.rs | use std::collections::HashMap;
use std::ffi::{CStr, c_int};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use neli::attr::Attribute;
use neli::consts::nl::{NlmF};
use neli::consts::socket::NlFamily;
use neli::consts::rtnl::{Ifa, Ifla, RtAddrFamily, RtScope, RtTable, Rta, Rtm, RtmF, Rtn, Rtprot};
use neli::err::RouterError;
use neli::nl::{NlPayload, Nlmsghdr};
use neli::router::synchronous::NlRouter;
use neli::rtnl::{
Ifaddrmsg, IfaddrmsgBuilder, Ifinfomsg, IfinfomsgBuilder, RtattrBuilder, Rtmsg, RtmsgBuilder,
};
use neli::types::RtBuffer;
use neli::consts::rtnl::RtAddrFamily::{Inet, Inet6};
use neli::utils::Groups;
use crate::Error;
#[cfg(target_env = "gnu")]
const RTM_FLAGS_LOOKUP: &[RtmF] = &[RtmF::LOOKUPTABLE];
#[cfg(not(target_env = "gnu"))]
const RTM_FLAGS_LOOKUP: &[RtmF] = &[];
/// Retrieves the local IPv4 address for this system
pub fn local_ip() -> Result<IpAddr, Error> {
local_ip_impl(Inet)
}
/// Retrieves the local IPv6 address for this system
pub fn local_ipv6() -> Result<IpAddr, Error> {
local_ip_impl(Inet6)
}
/// Retrieves the local broadcast IPv4 address for this system
pub fn local_broadcast_ip() -> Result<IpAddr, Error> {
local_broadcast_impl(Inet)
}
fn local_broadcast_impl(family: RtAddrFamily) -> Result<IpAddr, Error> {
let (netlink_socket, _) = NlRouter::connect(NlFamily::Route, None, Groups::empty())
.map_err(|err| Error::StrategyError(err.to_string()))?;
let pref_ip = local_ip()?;
let ifaddrmsg = IfaddrmsgBuilder::default()
.ifa_family(family)
.build()
.map_err(|err| Error::StrategyError(err.to_string()))?;
let recv = netlink_socket
.send(
Rtm::Getaddr,
NlmF::REQUEST | NlmF::ROOT,
NlPayload::Payload(ifaddrmsg),
)
.map_err(|err| Error::StrategyError(err.to_string()))?;
let mut broadcast_ip = None;
for response in recv {
let header: Nlmsghdr<Rtm, Ifaddrmsg> = response.map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's socket response",
))
})?;
if let NlPayload::Empty = header.nl_payload() {
continue;
}
if *header.nl_type() != Rtm::Newaddr {
return Err(Error::StrategyError(String::from(
"The Netlink header type is not the expected",
)));
}
let p = header.get_payload().ok_or_else(|| {
Error::StrategyError(String::from(
"An error occurred getting Netlink's header payload",
))
})?;
if *p.ifa_scope() != RtScope::Universe {
continue;
}
if *p.ifa_family() != family {
Err(Error::StrategyError(format!(
"Invalid family in Netlink payload: {:?}",
p.ifa_family()
)))?
}
let mut is_match = false;
for rtattr in p.rtattrs().iter() {
if *rtattr.rta_type() == Ifa::Local {
if *p.ifa_family() == Inet {
let addr = Ipv4Addr::from(u32::from_be(
rtattr.get_payload_as::<u32>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
is_match = pref_ip == IpAddr::V4(addr);
} else {
let addr = Ipv6Addr::from(u128::from_be(
rtattr.get_payload_as::<u128>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
is_match = pref_ip == IpAddr::V6(addr);
}
}
if is_match && *rtattr.rta_type() == Ifa::Broadcast && *p.ifa_family() == Inet {
let addr = Ipv4Addr::from(u32::from_be(
rtattr.get_payload_as::<u32>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload broadcast attribute",
))
})?,
));
return Ok(IpAddr::V4(addr));
}
if *rtattr.rta_type() == Ifa::Broadcast && *p.ifa_family() == Inet {
let addr = Ipv4Addr::from(u32::from_be(rtattr.get_payload_as::<u32>().map_err(
|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
},
)?));
broadcast_ip = Some(IpAddr::V4(addr));
}
}
}
if let Some(broadcast_ip) = broadcast_ip {
return Ok(broadcast_ip);
}
Err(Error::LocalIpAddressNotFound)
}
fn local_ip_impl(family: RtAddrFamily) -> Result<IpAddr, Error> {
let (netlink_socket, _) = NlRouter::connect(NlFamily::Route, None, Groups::empty())
.map_err(|err| Error::StrategyError(err.to_string()))?;
match local_ip_impl_route(family, &netlink_socket) {
Ok(ip_addr) => Ok(ip_addr),
Err(Error::LocalIpAddressNotFound) => local_ip_impl_addr(family, &netlink_socket),
Err(e) => Err(e),
}
}
fn local_ip_impl_route(family: RtAddrFamily, netlink_socket: &NlRouter) -> Result<IpAddr, Error> {
let route_attr = match family {
Inet => {
let dstip = Ipv4Addr::new(192, 0, 2, 0); // reserved external IP
let raw_dstip = u32::from(dstip).to_be();
RtattrBuilder::default()
.rta_type(Rta::Dst)
.rta_payload(raw_dstip)
.build()
}
Inet6 => {
let dstip = Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 0); // reserved external IP
let raw_dstip = u128::from(dstip).to_be();
RtattrBuilder::default()
.rta_type(Rta::Dst)
.rta_payload(raw_dstip)
.build()
}
_ => Err(Error::StrategyError(format!(
"Invalid address family given: {:#?}",
family
)))?,
};
let mut ifroutemsg = RtmsgBuilder::default()
.rtm_family(family)
.rtm_src_len(0)
.rtm_dst_len(0)
.rtm_tos(0)
.rtm_table(RtTable::Unspec)
.rtm_protocol(Rtprot::Unspec)
.rtm_scope(RtScope::Universe)
.rtm_type(Rtn::Unspec);
let route_attr = route_attr.map_err(|err| Error::StrategyError(err.to_string()))?;
let mut route_payload = RtBuffer::new();
route_payload.push(route_attr);
ifroutemsg = ifroutemsg.rtattrs(route_payload);
let rtm_flags = RTM_FLAGS_LOOKUP.iter().cloned().reduce(|a, b| a | b);
if let Some(flags) = rtm_flags {
ifroutemsg = ifroutemsg.rtm_flags(flags);
}
let ifroutemsg = ifroutemsg
.build()
.map_err(|err| Error::StrategyError(err.to_string()))?;
let recv = netlink_socket
.send(Rtm::Getroute, NlmF::REQUEST, NlPayload::Payload(ifroutemsg))
.map_err(|err| Error::StrategyError(err.to_string()))?;
for response in recv {
let header: Nlmsghdr<Rtm, Rtmsg> = response.map_err(|err| {
if let RouterError::Nlmsgerr(ref err) = err {
if *err.error() == -libc::ENETUNREACH {
return Error::LocalIpAddressNotFound;
}
}
Error::StrategyError(format!(
"An error occurred retrieving Netlink's socket response: {err}",
))
})?;
if let NlPayload::Empty = *header.nl_payload() {
continue;
}
if *header.nl_type() != Rtm::Newroute {
return Err(Error::StrategyError(String::from(
"The Netlink header type is not the expected",
)));
}
let p = header.get_payload().ok_or_else(|| {
Error::StrategyError(String::from(
"An error occurred getting Netlink's header payload",
))
})?;
if *p.rtm_scope() != RtScope::Universe {
continue;
}
if *p.rtm_family() != family {
Err(Error::StrategyError(format!(
"Invalid address family in Netlink payload: {:?}",
p.rtm_family()
)))?
}
for rtattr in p.rtattrs().iter() {
if *rtattr.rta_type() == Rta::Prefsrc {
if *p.rtm_family() == Inet {
let addr = Ipv4Addr::from(u32::from_be(
rtattr.get_payload_as::<u32>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
return Ok(IpAddr::V4(addr));
} else {
let addr = Ipv6Addr::from(u128::from_be(
rtattr.get_payload_as::<u128>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
return Ok(IpAddr::V6(addr));
}
}
}
}
Err(Error::LocalIpAddressNotFound)
}
fn local_ip_impl_addr(family: RtAddrFamily, netlink_socket: &NlRouter) -> Result<IpAddr, Error> {
let ifaddrmsg = IfaddrmsgBuilder::default()
.ifa_family(family)
.build()
.map_err(|err| Error::StrategyError(err.to_string()))?;
let recv = netlink_socket
.send(
Rtm::Getaddr,
NlmF::REQUEST | NlmF::ROOT,
NlPayload::Payload(ifaddrmsg),
)
.map_err(|err| Error::StrategyError(err.to_string()))?;
for response in recv {
let header: Nlmsghdr<Rtm, Ifaddrmsg> = response.map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's socket response",
))
})?;
if let NlPayload::Empty = *header.nl_payload() {
continue;
}
if *header.nl_type() != Rtm::Newaddr {
return Err(Error::StrategyError(String::from(
"The Netlink header type is not the expected",
)));
}
let p = header.get_payload().ok_or_else(|| {
Error::StrategyError(String::from(
"An error occurred getting Netlink's header payload",
))
})?;
if *p.ifa_scope() != RtScope::Universe {
continue;
}
if *p.ifa_family() != family {
Err(Error::StrategyError(format!(
"Invalid family in Netlink payload: {:?}",
p.ifa_family()
)))?
}
for rtattr in p.rtattrs().iter() {
if *rtattr.rta_type() == Ifa::Local {
if *p.ifa_family() == Inet {
let addr = Ipv4Addr::from(u32::from_be(
rtattr.get_payload_as::<u32>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
return Ok(IpAddr::V4(addr));
} else {
let addr = Ipv6Addr::from(u128::from_be(
rtattr.get_payload_as::<u128>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
return Ok(IpAddr::V6(addr));
}
}
}
}
Err(Error::LocalIpAddressNotFound)
}
/// Perform a search over the system's network interfaces using Netlink Route information,
/// retrieved network interfaces belonging to both socket address families
/// `AF_INET` and `AF_INET6` are retrieved along with the interface address name.
///
/// # Example
///
/// ```
/// use std::net::IpAddr;
/// use local_ip_address::list_afinet_netifas;
///
/// let ifas = list_afinet_netifas().unwrap();
///
/// if let Some((_, ipaddr)) = ifas
/// .iter()
/// .find(|(name, ipaddr)| *name == "en0" && matches!(ipaddr, IpAddr::V4(_))) {
/// // This is your local IP address: 192.168.1.111
/// println!("This is your local IP address: {:?}", ipaddr);
/// }
/// ```
pub fn list_afinet_netifas() -> Result<Vec<(String, IpAddr)>, Error> {
let (netlink_socket, _) = NlRouter::connect(NlFamily::Route, None, Groups::empty())
.map_err(|err| Error::StrategyError(err.to_string()))?;
// First get list of interfaces via RTM_GETLINK
let ifinfomsg = IfinfomsgBuilder::default()
.ifi_family(RtAddrFamily::Unspecified)
.build()
.map_err(|err| Error::StrategyError(err.to_string()))?;
let recv = netlink_socket
.send(
Rtm::Getlink,
NlmF::REQUEST | NlmF::DUMP,
NlPayload::Payload(ifinfomsg),
)
.map_err(|err| Error::StrategyError(err.to_string()))?;
let mut if_indexes = HashMap::new();
for response in recv {
let header: Nlmsghdr<Rtm, Ifinfomsg> = response.map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's socket response",
))
})?;
if let NlPayload::Empty = *header.nl_payload() {
continue;
}
if *header.nl_type() != Rtm::Newlink {
continue;
}
let p = header.get_payload().ok_or_else(|| {
Error::StrategyError(String::from(
"An error occurred getting Netlink's header payload",
))
})?;
for rtattr in p.rtattrs().iter() {
if *rtattr.rta_type() == Ifla::Ifname {
let ifname = parse_ifname(rtattr.payload().as_ref())?;
if_indexes.insert(*p.ifi_index(), ifname);
break;
}
}
}
// Secondly get addresses of interfaces via RTM_GETADDR
let ifaddrmsg = IfaddrmsgBuilder::default()
.ifa_family(RtAddrFamily::Unspecified)
.ifa_prefixlen(0)
.ifa_scope(RtScope::Universe)
.ifa_index(0)
.build()
.map_err(|err| Error::StrategyError(err.to_string()))?;
let recv = netlink_socket
.send(
Rtm::Getaddr,
NlmF::REQUEST | NlmF::DUMP,
NlPayload::Payload(ifaddrmsg),
)
.map_err(|err| Error::StrategyError(err.to_string()))?;
let mut interfaces = Vec::new();
for response in recv {
let header: Nlmsghdr<Rtm, Ifaddrmsg> = response.map_err(|err| {
Error::StrategyError(format!(
"An error occurred retrieving Netlink's socket response: {err}"
))
})?;
if let NlPayload::Empty = header.nl_payload() {
continue;
}
if *header.nl_type() != Rtm::Newaddr {
continue;
}
let p = header.get_payload().ok_or_else(|| {
Error::StrategyError(String::from(
"An error occurred getting Netlink's header payload",
))
})?;
if *p.ifa_family() != Inet6 && *p.ifa_family() != Inet {
Err(Error::StrategyError(format!(
"Netlink payload has unsupported family: {:?}",
p.ifa_family()
)))?
}
let mut ipaddr = None;
let mut label = None;
for rtattr in p.rtattrs().iter() {
if *rtattr.rta_type() == Ifa::Label {
let ifname = parse_ifname(rtattr.payload().as_ref())?;
label = Some(ifname);
} else if *rtattr.rta_type() == Ifa::Address {
if ipaddr.is_some() {
// do not override IFA_LOCAL
continue;
}
if *p.ifa_family() == Inet6 {
let rtaddr = Ipv6Addr::from(u128::from_be(
rtattr.get_payload_as::<u128>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
ipaddr = Some(IpAddr::V6(rtaddr));
} else {
let rtaddr = Ipv4Addr::from(u32::from_be(
rtattr.get_payload_as::<u32>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
ipaddr = Some(IpAddr::V4(rtaddr));
}
} else if *rtattr.rta_type() == Ifa::Local {
if *p.ifa_family() == Inet6 {
let rtlocal = Ipv6Addr::from(u128::from_be(
rtattr.get_payload_as::<u128>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
ipaddr = Some(IpAddr::V6(rtlocal));
} else {
let rtlocal = Ipv4Addr::from(u32::from_be(
rtattr.get_payload_as::<u32>().map_err(|_| {
Error::StrategyError(String::from(
"An error occurred retrieving Netlink's route payload attribute",
))
})?,
));
ipaddr = Some(IpAddr::V4(rtlocal));
}
}
}
if let Some(ipaddr) = ipaddr {
if let Some(ifname) = label {
interfaces.push((ifname, ipaddr));
} else if let Some(ifname) = if_indexes.get(&(*p.ifa_index() as c_int)) {
interfaces.push((ifname.clone(), ipaddr));
}
}
}
Ok(interfaces)
}
/// Parse network interface name of slice type to string type.
/// If the slice is suffixed with '\0', this suffix will be removed when parsing.
fn parse_ifname(bytes: &[u8]) -> Result<String, Error> {
let ifname = if bytes.ends_with(&[0u8]) {
CStr::from_bytes_with_nul(bytes)
.map_err(|err| {
Error::StrategyError(format!(
"An error occurred converting interface name to string: {err}",
))
})?
.to_string_lossy()
.to_string()
} else {
String::from_utf8_lossy(bytes).to_string()
};
Ok(ifname)
}
#[cfg(test)]
mod tests {
use crate::linux::parse_ifname;
#[test]
fn parse_ifname_without_nul() {
let expected = "hello, world";
let bytes = [104, 101, 108, 108, 111, 44, 32, 119, 111, 114, 108, 100];
let res = parse_ifname(&bytes);
assert!(res.is_ok());
assert_eq!(res.unwrap(), expected);
}
#[test]
fn parse_ifname_with_nul() {
let expected = "hello, world";
let bytes = [104, 101, 108, 108, 111, 44, 32, 119, 111, 114, 108, 100, 0];
let res = parse_ifname(&bytes);
assert!(res.is_ok());
assert_eq!(res.unwrap(), expected);
}
#[test]
fn parse_ifname_only_nul() {
let expected = "";
let bytes = [0u8];
let res = parse_ifname(&bytes);
assert!(res.is_ok());
assert_eq!(res.unwrap(), expected);
}
}
| rust | Apache-2.0 | eed9856f5e2ee03e2490922867337b24d8000a91 | 2026-01-04T20:18:14.206471Z | false |
LeoBorai/local-ip-address | https://github.com/LeoBorai/local-ip-address/blob/eed9856f5e2ee03e2490922867337b24d8000a91/src/error.rs | src/error.rs | #[derive(thiserror::Error, Debug, PartialEq)]
pub enum Error {
/// Returned when `local_ip` is unable to find the system's local IP address
/// in the collection of network interfaces
#[error("The Local IP Address wasn't available in the network interfaces list/table")]
LocalIpAddressNotFound,
/// Returned when an error occurs in the strategy level.
/// The error message may include any internal strategy error if available
#[error("An error occurred executing the underlying strategy error.\n{0}")]
StrategyError(String),
/// Returned when the current platform is not yet supported
#[error("The current platform: `{0}`, is not supported")]
PlatformNotSupported(String),
}
| rust | Apache-2.0 | eed9856f5e2ee03e2490922867337b24d8000a91 | 2026-01-04T20:18:14.206471Z | false |
LeoBorai/local-ip-address | https://github.com/LeoBorai/local-ip-address/blob/eed9856f5e2ee03e2490922867337b24d8000a91/src/windows.rs | src/windows.rs | use std::{
alloc::{alloc, dealloc, Layout},
net::IpAddr,
ptr::{NonNull, self},
slice,
marker::PhantomData,
ops::Deref,
mem,
};
use windows_sys::Win32::{
Foundation::{
GetLastError, LocalFree, ERROR_ADDRESS_NOT_ASSOCIATED, ERROR_BUFFER_OVERFLOW,
ERROR_INSUFFICIENT_BUFFER, ERROR_INVALID_PARAMETER, ERROR_NOT_ENOUGH_MEMORY,
ERROR_NOT_SUPPORTED, ERROR_NO_DATA, ERROR_SUCCESS, WIN32_ERROR,
},
NetworkManagement::IpHelper::{
GetAdaptersAddresses, GetIpForwardTable, GET_ADAPTERS_ADDRESSES_FLAGS,
IP_ADAPTER_ADDRESSES_LH, IP_ADAPTER_UNICAST_ADDRESS_LH, MIB_IPFORWARDTABLE,
},
Networking::WinSock::{
ADDRESS_FAMILY, AF_INET, AF_INET6, AF_UNSPEC, SOCKADDR_IN, SOCKADDR_IN6, SOCKADDR,
},
System::Diagnostics::Debug::{
FormatMessageW, FORMAT_MESSAGE_ALLOCATE_BUFFER, FORMAT_MESSAGE_FROM_SYSTEM,
},
};
use windows_sys::core::BOOL;
use crate::error::Error;
/// Retrieves the local ip addresses for this system.
pub(crate) fn list_local_ip_addresses(family: ADDRESS_FAMILY) -> Result<Vec<IpAddr>, Error> {
/// An IPv4 address of 0.0.0.0 in the dwForwardDest member of the MIB_IPFORWARDROW structure is considered a
/// default route.
const DEFAULT_ROUTE: u32 = 0;
// There can be multiple default routes (e.g. wifi and ethernet).
let default_route_interface_indices: Vec<u32> = {
let ip_forward_table = get_ip_forward_table(0).map_err(|error| match error {
ERROR_NO_DATA | ERROR_NOT_SUPPORTED => Error::LocalIpAddressNotFound,
error_code => Error::StrategyError(format_error_code(error_code)),
})?;
let table = unsafe {
slice::from_raw_parts(
ip_forward_table.table.as_ptr(),
ip_forward_table.dwNumEntries.try_into().unwrap(),
)
};
table
.iter()
.filter_map(|row| {
if row.dwForwardDest == DEFAULT_ROUTE {
Some(row.dwForwardIfIndex)
} else {
None
}
})
.collect()
};
let adapter_addresses = get_adapter_addresses(family, 0).map_err(|error| match error {
ERROR_ADDRESS_NOT_ASSOCIATED | ERROR_NO_DATA => Error::LocalIpAddressNotFound,
error_code => Error::StrategyError(format_error_code(error_code)),
})?;
let adapter_addresses_iter = LinkedListIter::new(Some(adapter_addresses.ptr));
let local_ip_address = adapter_addresses_iter
.filter(|adapter_address| {
let interface_index = unsafe { adapter_address.Anonymous1.Anonymous.IfIndex };
default_route_interface_indices.contains(&interface_index)
})
.flat_map(|default_adapter_address| {
let unicast_addresses_iter =
LinkedListIter::new(NonNull::new(default_adapter_address.FirstUnicastAddress));
unicast_addresses_iter.filter_map(|unicast_address| {
let socket_address = NonNull::new(unicast_address.Address.lpSockaddr)?;
get_ip_address_from_socket_address(socket_address)
})
})
.collect();
Ok(local_ip_address)
}
/// Perform a search over the system's network interfaces using `GetAdaptersAddresses`,
/// retrieved network interfaces belonging to both socket address families
/// `AF_INET` and `AF_INET6` are retrieved along with the interface address name.
///
/// # Example
///
/// ```
/// use std::net::IpAddr;
/// use local_ip_address::list_afinet_netifas;
///
/// let ifas = list_afinet_netifas().unwrap();
///
/// if let Some((_, ipaddr)) = ifas
/// .iter()
/// .find(|(name, ipaddr)| *name == "en0" && matches!(ipaddr, IpAddr::V4(_))) {
/// // This is your local IP address: 192.168.1.111
/// println!("This is your local IP address: {:?}", ipaddr);
/// }
/// ```
pub fn list_afinet_netifas() -> Result<Vec<(String, IpAddr)>, Error> {
let adapter_addresses = get_adapter_addresses(AF_UNSPEC, 0)
.map_err(|error_code| Error::StrategyError(format_error_code(error_code)))?;
let adapter_addresses_iter = LinkedListIter::new(Some(adapter_addresses.ptr));
let network_interfaces = adapter_addresses_iter
.flat_map(|adapter_address| {
let unicast_addresses_iter =
LinkedListIter::new(NonNull::new(adapter_address.FirstUnicastAddress));
let friendly_name = unsafe {
#[allow(unused_unsafe)]
// SAFETY: This is basically how `wcslen` works under the hood. `wcslen` is unsafe because the pointer
// is not checked for null and if there is no null-terminating character, it will run forever.
// Therefore, safety relies on the operating sysytem always returning a valid string.
let len = unsafe {
let mut ptr = adapter_address.FriendlyName;
while *ptr != 0 {
ptr = ptr.offset(1);
}
ptr.offset_from(adapter_address.FriendlyName)
.try_into()
.unwrap()
};
slice::from_raw_parts(adapter_address.FriendlyName, len)
};
unicast_addresses_iter.filter_map(|unicast_address| {
let socket_address = NonNull::new(unicast_address.Address.lpSockaddr)?;
get_ip_address_from_socket_address(socket_address)
.map(|ip_address| (String::from_utf16_lossy(friendly_name), ip_address))
})
})
.collect();
Ok(network_interfaces)
}
/// The [GetIpForwardTable][GetIpForwardTable] function retrieves the IPv4 routing table.
///
/// [GetIpForwardTable]: https://docs.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getipforwardtable
fn get_ip_forward_table(order: BOOL) -> Result<ReadonlyResource<MIB_IPFORWARDTABLE>, WIN32_ERROR> {
// The minimum size of a routing table.
const INITIAL_BUFFER_SIZE: u32 = mem::size_of::<MIB_IPFORWARDTABLE>() as u32;
let mut size = INITIAL_BUFFER_SIZE;
loop {
let ip_forward_table =
ReadonlyResource::new(size.try_into().unwrap()).ok_or(ERROR_NOT_ENOUGH_MEMORY)?;
let result = unsafe { GetIpForwardTable(ip_forward_table.ptr.as_ptr(), &mut size, order) };
break match result {
ERROR_SUCCESS => Ok(ip_forward_table),
ERROR_INSUFFICIENT_BUFFER => continue,
#[cfg(debug_assertions)]
ERROR_INVALID_PARAMETER => unreachable!(),
ERROR_NO_DATA => Err(ERROR_NO_DATA),
ERROR_NOT_SUPPORTED => Err(ERROR_NOT_SUPPORTED),
error => Err(error),
};
}
}
/// The [GetAdaptersAddresses][GetAdaptersAddresses] function retrieves the addresses associated with the adapters on
/// the local computer.
///
/// [GetAdaptersAddresses]: https://docs.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses
fn get_adapter_addresses(
family: ADDRESS_FAMILY,
flags: GET_ADAPTERS_ADDRESSES_FLAGS,
) -> Result<ReadonlyResource<IP_ADAPTER_ADDRESSES_LH>, WIN32_ERROR> {
// The recommended buffer size is 15kb.
const INITIAL_BUFFER_SIZE: u32 = 15000;
let mut size: u32 = INITIAL_BUFFER_SIZE;
loop {
let adapter_addresses =
ReadonlyResource::new(size.try_into().unwrap()).ok_or(ERROR_NOT_ENOUGH_MEMORY)?;
let result = unsafe {
GetAdaptersAddresses(
family as u32,
flags,
ptr::null_mut(),
adapter_addresses.ptr.as_ptr(),
&mut size,
)
};
break match result {
ERROR_SUCCESS => Ok(adapter_addresses),
ERROR_BUFFER_OVERFLOW => continue,
#[cfg(debug_assertions)]
ERROR_INVALID_PARAMETER => unreachable!(),
ERROR_ADDRESS_NOT_ASSOCIATED => Err(ERROR_ADDRESS_NOT_ASSOCIATED),
ERROR_NOT_ENOUGH_MEMORY => Err(ERROR_NOT_ENOUGH_MEMORY),
ERROR_NO_DATA => Err(ERROR_NO_DATA),
error => Err(error),
};
}
}
/// Converts a Windows socket address to an ip address.
fn get_ip_address_from_socket_address(socket_address: NonNull<SOCKADDR>) -> Option<IpAddr> {
let socket_address_family = u32::from(unsafe { socket_address.as_ref().sa_family }) as u16;
if socket_address_family == AF_INET {
let socket_address = unsafe { socket_address.cast::<SOCKADDR_IN>().as_ref() };
let address = unsafe { socket_address.sin_addr.S_un.S_addr };
let ipv4_address = IpAddr::from(address.to_ne_bytes());
Some(ipv4_address)
} else if socket_address_family == AF_INET6 {
let socket_address = unsafe { socket_address.cast::<SOCKADDR_IN6>().as_ref() };
let address = unsafe { socket_address.sin6_addr.u.Byte };
let ipv6_address = IpAddr::from(address);
Some(ipv6_address)
} else {
None
}
}
/// Formats a Windows API error code to a localized error message.
// Based on the example in https://docs.microsoft.com/en-us/globalization/localizability/win32-formatmessage.
fn format_error_code(error_code: WIN32_ERROR) -> String {
let mut wide_ptr = ptr::null_mut::<u16>();
let len = unsafe {
FormatMessageW(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
ptr::null(),
error_code,
0,
&mut wide_ptr as *mut _ as *mut _,
0,
ptr::null(),
)
};
debug_assert!(
len > 0,
"Retrieving static error message from the OS for error code {} failed with error code {}.",
error_code,
unsafe { GetLastError() }
);
let slice = unsafe { slice::from_raw_parts(wide_ptr, len.try_into().unwrap()) };
let error_message = String::from_utf16_lossy(slice);
unsafe {
LocalFree(&mut wide_ptr as *mut _ as *mut _);
}
error_message
}
/// Wrapper type around a pointer to a Windows API structure.
///
/// This type ensures that the memory allocated is freed automatically and fields are not overwritten.
struct ReadonlyResource<T> {
ptr: NonNull<T>,
layout: Layout,
}
/// A trait to allow low level linked list data structures to be used as Rust iterators.
///
/// The networking data structures often contain linked lists, which (unfortunately) are a separate types with
/// differently named next fields. This trait aims to abstract over these differences and offers helper structs to
/// transform linked lists into iterators.
trait LinkedListIterator {
/// Returns the pointer to the next value.
fn next(&self) -> Option<NonNull<Self>>;
}
/// Adapter to convert a linked list to an iterator of references.
struct LinkedListIter<'linked_list, T: LinkedListIterator> {
node: Option<NonNull<T>>,
__phantom_lifetime: PhantomData<&'linked_list T>,
}
impl<T> ReadonlyResource<T> {
fn new(size: usize) -> Option<ReadonlyResource<T>> {
let layout = Layout::from_size_align(size, mem::align_of::<T>()).ok()?;
let ptr = NonNull::new(unsafe { alloc(layout).cast() })?;
Some(ReadonlyResource { ptr, layout })
}
}
impl<T> Deref for ReadonlyResource<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { self.ptr.as_ref() }
}
}
impl<T> Drop for ReadonlyResource<T> {
fn drop(&mut self) {
unsafe {
dealloc(self.ptr.as_ptr().cast(), self.layout);
}
}
}
impl LinkedListIterator for IP_ADAPTER_ADDRESSES_LH {
fn next(&self) -> Option<NonNull<Self>> {
NonNull::new(self.Next)
}
}
impl LinkedListIterator for IP_ADAPTER_UNICAST_ADDRESS_LH {
fn next(&self) -> Option<NonNull<Self>> {
NonNull::new(self.Next)
}
}
impl<T: LinkedListIterator> LinkedListIter<'_, T> {
/// Creates a new [LinkedListIter] from a pointer to the head of the linked list.
pub fn new(head: Option<NonNull<T>>) -> Self {
Self {
node: head,
__phantom_lifetime: PhantomData,
}
}
}
impl<'linked_list, T: LinkedListIterator> Iterator for LinkedListIter<'linked_list, T> {
type Item = &'linked_list T;
fn next(&mut self) -> Option<Self::Item> {
let node = self.node?;
let item = unsafe { node.as_ref() };
self.node = item.next();
Some(item)
}
}
| rust | Apache-2.0 | eed9856f5e2ee03e2490922867337b24d8000a91 | 2026-01-04T20:18:14.206471Z | false |
LeoBorai/local-ip-address | https://github.com/LeoBorai/local-ip-address/blob/eed9856f5e2ee03e2490922867337b24d8000a91/examples/show_ip_and_ifs.rs | examples/show_ip_and_ifs.rs | use local_ip_address::{list_afinet_netifas, local_ip, local_ipv6};
// this is only supported on linux currently
#[cfg(target_os = "linux")]
use local_ip_address::local_broadcast_ip;
fn main() {
match local_ip() {
Ok(ip) => println!("Local IPv4: {}", ip),
Err(err) => println!("Failed to get local IPv4: {}", err),
};
match local_ipv6() {
Ok(ip) => println!("Local IPv6: {}", ip),
Err(err) => println!("Failed to get local IPv6: {}", err),
};
// this is only supported on linux currently
#[cfg(target_os = "linux")]
match local_broadcast_ip() {
Ok(ip) => println!("Local broadcast IPv4: {}", ip),
Err(err) => println!("Failed to get local broadcast IPv4: {}", err),
};
match list_afinet_netifas() {
Ok(netifs) => {
println!("Got {} interfaces", netifs.len());
for netif in netifs {
println!("IF: {}, IP: {}", netif.0, netif.1);
}
}
Err(err) => println!("Failed to get list of network interfaces: {}", err),
};
}
| rust | Apache-2.0 | eed9856f5e2ee03e2490922867337b24d8000a91 | 2026-01-04T20:18:14.206471Z | false |
ameknite/rust-ci-cd-template | https://github.com/ameknite/rust-ci-cd-template/blob/658abcd10b7863f7926f0256ee7457b48e132ec5/src/main.rs | src/main.rs | // SPDX-License-Identifier: CC0-1.0 OR MIT OR Apache-2.0
// -> todo! remove this notice
//! Documentation for the crate: A rust ci/cd templates
fn main() {
println!("Hello, world!");
}
| rust | Apache-2.0 | 658abcd10b7863f7926f0256ee7457b48e132ec5 | 2026-01-04T20:18:15.064331Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/errors.rs | src/errors.rs | use crate::{FirestoreTransaction, FirestoreTransactionId};
use gcloud_sdk::google::firestore::v1::WriteRequest;
use rsb_derive::Builder;
use serde::*;
use std::error::Error;
use std::fmt::Display;
use std::fmt::Formatter;
/// The main error type for all Firestore operations.
///
/// This enum consolidates various specific error types that can occur
/// during interactions with Google Firestore.
#[derive(Debug)]
pub enum FirestoreError {
/// An error originating from the underlying system or a dependency, not directly
/// from a Firestore API interaction. This could include issues with the gRPC client,
/// I/O errors, etc.
SystemError(FirestoreSystemError),
/// A general error reported by the Firestore database.
/// This often wraps errors returned by the Firestore gRPC API.
DatabaseError(FirestoreDatabaseError),
/// An error indicating a data conflict, such as trying to create a document
/// that already exists, or an optimistic locking failure.
DataConflictError(FirestoreDataConflictError),
/// An error indicating that the requested data (e.g., a document or collection)
/// was not found.
DataNotFoundError(FirestoreDataNotFoundError),
/// An error due to invalid parameters provided by the client for an operation.
InvalidParametersError(FirestoreInvalidParametersError),
/// An error that occurred during the serialization of data to be sent to Firestore.
SerializeError(FirestoreSerializationError),
/// An error that occurred during the deserialization of data received from Firestore.
DeserializeError(FirestoreSerializationError),
/// An error related to network connectivity or communication with the Firestore service.
NetworkError(FirestoreNetworkError),
/// An error that occurred specifically within the context of a Firestore transaction.
ErrorInTransaction(FirestoreErrorInTransaction),
/// An error related to the caching layer, if enabled and used.
CacheError(FirestoreCacheError),
}
impl Display for FirestoreError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match *self {
FirestoreError::SystemError(ref err) => err.fmt(f),
FirestoreError::DatabaseError(ref err) => err.fmt(f),
FirestoreError::DataConflictError(ref err) => err.fmt(f),
FirestoreError::DataNotFoundError(ref err) => err.fmt(f),
FirestoreError::InvalidParametersError(ref err) => err.fmt(f),
FirestoreError::SerializeError(ref err) => err.fmt(f),
FirestoreError::DeserializeError(ref err) => err.fmt(f),
FirestoreError::NetworkError(ref err) => err.fmt(f),
FirestoreError::ErrorInTransaction(ref err) => err.fmt(f),
FirestoreError::CacheError(ref err) => err.fmt(f),
}
}
}
impl Error for FirestoreError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match *self {
FirestoreError::SystemError(ref err) => Some(err),
FirestoreError::DatabaseError(ref err) => Some(err),
FirestoreError::DataConflictError(ref err) => Some(err),
FirestoreError::DataNotFoundError(ref err) => Some(err),
FirestoreError::InvalidParametersError(ref err) => Some(err),
FirestoreError::SerializeError(ref err) => Some(err),
FirestoreError::DeserializeError(ref err) => Some(err),
FirestoreError::NetworkError(ref err) => Some(err),
FirestoreError::ErrorInTransaction(ref err) => Some(err),
FirestoreError::CacheError(ref err) => Some(err),
}
}
}
/// Generic public details for Firestore errors.
///
/// This struct is often embedded in more specific error types to provide
/// a common way to access a general error code or identifier.
#[derive(Debug, Eq, PartialEq, Clone, Builder, Serialize, Deserialize)]
pub struct FirestoreErrorPublicGenericDetails {
/// A string code representing the error, often derived from gRPC status codes
/// or other specific error identifiers.
pub code: String,
}
impl Display for FirestoreErrorPublicGenericDetails {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "Error code: {}", self.code)
}
}
/// Represents a system-level or internal error.
///
/// These errors are typically not directly from the Firestore API but from underlying
/// components like the gRPC client, I/O operations, or other system interactions.
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreSystemError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// A descriptive message detailing the system error.
pub message: String,
}
impl Display for FirestoreSystemError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Firestore system/internal error: {}. {}",
self.public, self.message
)
}
}
impl std::error::Error for FirestoreSystemError {}
/// Represents a general error reported by the Firestore database.
///
/// This often wraps errors returned by the Firestore gRPC API.
#[derive(Debug, Clone, Builder)]
pub struct FirestoreDatabaseError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// Specific details about the database error.
pub details: String,
/// Indicates whether retrying the operation might succeed.
pub retry_possible: bool,
}
impl Display for FirestoreDatabaseError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Database general error occurred: {}. {}. Retry possibility: {}",
self.public, self.details, self.retry_possible
)
}
}
impl std::error::Error for FirestoreDatabaseError {}
/// Represents an error due to a data conflict.
///
/// This can occur, for example, if trying to create a document that already exists
/// or if an optimistic locking condition (e.g., based on `update_time`) is not met.
#[derive(Debug, Clone, Builder)]
pub struct FirestoreDataConflictError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// Specific details about the data conflict.
pub details: String,
}
impl Display for FirestoreDataConflictError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Database conflict error occurred: {}. {}",
self.public, self.details
)
}
}
impl std::error::Error for FirestoreDataConflictError {}
/// Represents an error indicating that requested data was not found.
///
/// This is typically returned when trying to access a document or resource
/// that does not exist in Firestore.
#[derive(Debug, Clone, Builder)]
pub struct FirestoreDataNotFoundError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// A message providing more details about what data was not found.
pub data_detail_message: String,
}
impl Display for FirestoreDataNotFoundError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Data not found error occurred: {}. {}",
self.public, self.data_detail_message
)
}
}
impl std::error::Error for FirestoreDataNotFoundError {}
/// Public details for an invalid parameters error.
///
/// Provides information about which parameter was invalid and why.
#[derive(Debug, Eq, PartialEq, Clone, Builder, Serialize, Deserialize)]
pub struct FirestoreInvalidParametersPublicDetails {
/// The name of the field or parameter that was invalid.
pub field: String,
/// A description of why the parameter is considered invalid.
pub error: String,
}
impl Display for FirestoreInvalidParametersPublicDetails {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Invalid parameters error: {}. {}",
self.field, self.error
)
}
}
/// Represents an error due to invalid parameters provided for an operation.
///
/// This error occurs when the client sends a request with parameters that
/// do not meet the Firestore API's requirements (e.g., invalid document ID format).
#[derive(Debug, Clone, Builder)]
pub struct FirestoreInvalidParametersError {
/// Detailed information about the invalid parameter.
pub public: FirestoreInvalidParametersPublicDetails,
}
impl Display for FirestoreInvalidParametersError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "Data not found error occurred: {}", self.public)
}
}
impl std::error::Error for FirestoreInvalidParametersError {}
/// Public details for an error related to invalid JSON.
///
/// Note: This error type appears to be defined but might not be actively used
/// throughout the crate in favor of `FirestoreSerializationError` for broader
/// serialization issues.
#[derive(Debug, Eq, PartialEq, Clone, Builder, Serialize, Deserialize)]
pub struct FirestoreInvalidJsonErrorPublicDetails {
/// A code identifying the nature of the JSON error.
pub code: String,
}
/// Represents an error related to network connectivity or communication.
///
/// This can include issues like timeouts, connection refused, or other problems
/// encountered while trying to communicate with the Firestore service.
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreNetworkError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// A descriptive message detailing the network error.
pub message: String,
}
impl Display for FirestoreNetworkError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "Network error: {}. {}", self.public, self.message)
}
}
impl std::error::Error for FirestoreNetworkError {}
impl From<gcloud_sdk::error::Error> for FirestoreError {
fn from(e: gcloud_sdk::error::Error) -> Self {
FirestoreError::SystemError(FirestoreSystemError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", e.kind())),
format!("GCloud system error: {e}"),
))
}
}
impl From<gcloud_sdk::tonic::Status> for FirestoreError {
fn from(status: gcloud_sdk::tonic::Status) -> Self {
match status.code() {
gcloud_sdk::tonic::Code::AlreadyExists => {
FirestoreError::DataConflictError(FirestoreDataConflictError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("{status}"),
))
}
gcloud_sdk::tonic::Code::NotFound => {
FirestoreError::DataNotFoundError(FirestoreDataNotFoundError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("{status}"),
))
}
gcloud_sdk::tonic::Code::Aborted
| gcloud_sdk::tonic::Code::Cancelled
| gcloud_sdk::tonic::Code::Unavailable
| gcloud_sdk::tonic::Code::ResourceExhausted => {
FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("{status}"),
true,
))
}
gcloud_sdk::tonic::Code::Unknown => check_hyper_errors(status),
_ => FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("{status}"),
false,
)),
}
}
}
fn check_hyper_errors(status: gcloud_sdk::tonic::Status) -> FirestoreError {
match status.source() {
Some(hyper_error) => match hyper_error.downcast_ref::<hyper::Error>() {
Some(err) if err.is_closed() => {
FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new("CONNECTION_CLOSED".into()),
format!("Hyper error: {err}"),
true,
))
}
Some(err) if err.is_timeout() => {
FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new("CONNECTION_TIMEOUT".into()),
format!("Hyper error: {err}"),
true,
))
}
Some(err) => FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("Hyper error: {err}"),
false,
)),
_ if status.code() == gcloud_sdk::tonic::Code::Unknown
&& status.message().contains("transport error") =>
{
FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new("CONNECTION_ERROR".into()),
format!("{status}"),
true,
))
}
_ => FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("{status}"),
false,
)),
},
_ => FirestoreError::DatabaseError(FirestoreDatabaseError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", status.code())),
format!("{status} without root cause"),
false,
)),
}
}
impl serde::ser::Error for FirestoreError {
fn custom<T>(msg: T) -> Self
where
T: Display,
{
FirestoreError::SerializeError(FirestoreSerializationError::from_message(msg.to_string()))
}
}
impl serde::de::Error for FirestoreError {
fn custom<T>(msg: T) -> Self
where
T: Display,
{
FirestoreError::DeserializeError(FirestoreSerializationError::from_message(msg.to_string()))
}
}
/// Represents an error that occurred during data serialization or deserialization.
///
/// This is used when converting Rust types to Firestore's format or vice-versa,
/// and an issue arises (e.g., unsupported types, malformed data).
#[derive(Debug, Builder)]
pub struct FirestoreSerializationError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// A descriptive message detailing the serialization/deserialization error.
pub message: String,
/// The path of the document being processed when the error occurred, if applicable.
pub document_path: Option<String>,
}
impl FirestoreSerializationError {
/// Creates a `FirestoreSerializationError` from a message string.
pub fn from_message<S: AsRef<str>>(message: S) -> FirestoreSerializationError {
let message_str = message.as_ref().to_string();
FirestoreSerializationError::new(
FirestoreErrorPublicGenericDetails::new("SerializationError".to_string()),
message_str,
)
}
}
impl Display for FirestoreSerializationError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Invalid serialization: {}. {}. Document path: {}",
self.public,
self.message,
self.document_path.as_deref().unwrap_or("-")
)
}
}
impl std::error::Error for FirestoreSerializationError {}
/// Represents an error related to the caching layer.
///
/// This error is used if the `caching` feature is enabled and an issue
/// occurs with cache operations (e.g., backend storage error, cache inconsistency).
#[derive(Debug, Builder)]
pub struct FirestoreCacheError {
/// Generic public details about the error.
pub public: FirestoreErrorPublicGenericDetails,
/// A descriptive message detailing the cache error.
pub message: String,
}
impl Display for FirestoreCacheError {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(f, "Cache error: {}. {}", self.public, self.message)
}
}
impl std::error::Error for FirestoreCacheError {}
impl From<chrono::ParseError> for FirestoreError {
fn from(parse_err: chrono::ParseError) -> Self {
FirestoreError::DeserializeError(FirestoreSerializationError::from_message(format!(
"Parse error: {parse_err}"
)))
}
}
impl From<chrono::OutOfRangeError> for FirestoreError {
fn from(out_of_range: chrono::OutOfRangeError) -> Self {
FirestoreError::InvalidParametersError(FirestoreInvalidParametersError::new(
FirestoreInvalidParametersPublicDetails::new(
format!("Out of range: {out_of_range}"),
"duration".to_string(),
),
))
}
}
impl From<tokio::sync::mpsc::error::SendError<gcloud_sdk::google::firestore::v1::WriteRequest>>
for FirestoreError
{
fn from(send_error: tokio::sync::mpsc::error::SendError<WriteRequest>) -> Self {
FirestoreError::NetworkError(FirestoreNetworkError::new(
FirestoreErrorPublicGenericDetails::new("SEND_STREAM_ERROR".into()),
format!("Send stream error: {send_error}"),
))
}
}
/// Represents an error that occurred within the scope of a Firestore transaction.
///
/// This struct captures errors that happen during the execution of user-provided
/// code within a transaction block, or errors from Firestore related to the transaction itself.
#[derive(Debug, Builder)]
pub struct FirestoreErrorInTransaction {
/// The ID of the transaction in which the error occurred.
pub transaction_id: FirestoreTransactionId,
/// The underlying error that caused the transaction to fail.
pub source: Box<dyn std::error::Error + Send + Sync>,
}
impl FirestoreErrorInTransaction {
/// Wraps an error as a permanent `BackoffError` within a transaction context.
///
/// Permanent errors are those that are unlikely to be resolved by retrying
/// the transaction (e.g., data validation errors in user code).
pub fn permanent<E: std::error::Error + Send + Sync + 'static>(
transaction: &FirestoreTransaction,
source: E,
) -> BackoffError<FirestoreError> {
BackoffError::permanent(FirestoreError::ErrorInTransaction(
FirestoreErrorInTransaction {
transaction_id: transaction.transaction_id().clone(),
source: Box::new(source),
},
))
}
/// Wraps an error as a transient `BackoffError` within a transaction context.
///
/// Transient errors are those that might be resolved by retrying the transaction
/// (e.g., temporary network issues, concurrent modification conflicts).
pub fn transient<E: std::error::Error + Send + Sync + 'static>(
transaction: &FirestoreTransaction,
source: E,
) -> BackoffError<FirestoreError> {
BackoffError::transient(FirestoreError::ErrorInTransaction(
FirestoreErrorInTransaction {
transaction_id: transaction.transaction_id().clone(),
source: Box::new(source),
},
))
}
/// Wraps an error as a `BackoffError` that should be retried after a specific duration.
pub fn retry_after<E: std::error::Error + Send + Sync + 'static>(
transaction: &FirestoreTransaction,
source: E,
retry_after: chrono::Duration,
) -> BackoffError<FirestoreError> {
BackoffError::retry_after(
FirestoreError::ErrorInTransaction(FirestoreErrorInTransaction {
transaction_id: transaction.transaction_id().clone(),
source: Box::new(source),
}),
std::time::Duration::from_millis(retry_after.num_milliseconds() as u64),
)
}
}
impl Display for FirestoreErrorInTransaction {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
write!(
f,
"Error occurred inside run transaction scope {}: {}",
hex::encode(&self.transaction_id),
self.source
)
}
}
impl std::error::Error for FirestoreErrorInTransaction {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(self.source.as_ref())
}
}
/// A type alias for `backoff::Error<E>`, commonly used for operations
/// that support retry mechanisms with backoff strategies.
pub type BackoffError<E> = backoff::Error<E>;
pub(crate) fn firestore_err_to_backoff(err: FirestoreError) -> BackoffError<FirestoreError> {
match err {
FirestoreError::DatabaseError(ref db_err) if db_err.retry_possible => {
backoff::Error::transient(err)
}
other => backoff::Error::permanent(other),
}
}
pub(crate) type AnyBoxedErrResult<T> = Result<T, Box<dyn std::error::Error + Send + Sync>>;
impl From<std::io::Error> for FirestoreError {
fn from(io_error: std::io::Error) -> Self {
FirestoreError::SystemError(FirestoreSystemError::new(
FirestoreErrorPublicGenericDetails::new(format!("{:?}", io_error.kind())),
format!("I/O error: {io_error}"),
))
}
}
#[cfg(feature = "caching-persistent")]
impl From<gcloud_sdk::prost::EncodeError> for FirestoreError {
fn from(err: gcloud_sdk::prost::EncodeError) -> Self {
FirestoreError::SerializeError(FirestoreSerializationError::new(
FirestoreErrorPublicGenericDetails::new("PrototBufEncodeError".into()),
format!("Protobuf serialization error: {err}"),
))
}
}
#[cfg(feature = "caching-persistent")]
impl From<gcloud_sdk::prost::DecodeError> for FirestoreError {
fn from(err: gcloud_sdk::prost::DecodeError) -> Self {
FirestoreError::SerializeError(FirestoreSerializationError::new(
FirestoreErrorPublicGenericDetails::new("PrototBufDecodeError".into()),
format!("Protobuf deserialization error: {err}"),
))
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_document_functions.rs | src/firestore_document_functions.rs | use crate::FirestoreDocument;
use std::collections::HashMap;
/// Retrieves a field's value from a Firestore document using a dot-separated path.
///
/// This function allows accessing nested fields within a document's map values.
/// For example, given a document with a field `user` which is a map containing
/// a field `name`, you can retrieve the value of `name` using the path `"user.name"`.
///
/// Backticks (`) in field paths are automatically removed, as they are sometimes
/// used by the `struct_path` macro for escaping.
///
/// # Arguments
/// * `doc`: A reference to the [`FirestoreDocument`] to extract the field from.
/// * `field_path`: A dot-separated string representing the path to the desired field.
///
/// # Returns
/// Returns `Some(&gcloud_sdk::google::firestore::v1::value::ValueType)` if the field
/// is found at the specified path, otherwise `None`. The `ValueType` enum holds the
/// actual typed value (e.g., `StringValue`, `IntegerValue`).
///
/// # Examples
/// ```rust
/// use firestore::{firestore_doc_get_field_by_path, FirestoreDocument, FirestoreValue};
/// use gcloud_sdk::google::firestore::v1::MapValue;
/// use std::collections::HashMap;
///
/// let mut fields = HashMap::new();
/// let mut user_map_fields = HashMap::new();
/// user_map_fields.insert("name".to_string(), gcloud_sdk::google::firestore::v1::Value {
/// value_type: Some(gcloud_sdk::google::firestore::v1::value::ValueType::StringValue("Alice".to_string())),
/// });
/// fields.insert("user".to_string(), gcloud_sdk::google::firestore::v1::Value {
/// value_type: Some(gcloud_sdk::google::firestore::v1::value::ValueType::MapValue(MapValue { fields: user_map_fields })),
/// });
///
/// let doc = FirestoreDocument {
/// name: "projects/p/databases/d/documents/c/doc1".to_string(),
/// fields,
/// create_time: None,
/// update_time: None,
/// };
///
/// let name_value_type = firestore_doc_get_field_by_path(&doc, "user.name");
/// assert!(name_value_type.is_some());
/// if let Some(gcloud_sdk::google::firestore::v1::value::ValueType::StringValue(name)) = name_value_type {
/// assert_eq!(name, "Alice");
/// } else {
/// panic!("Expected StringValue");
/// }
///
/// let non_existent_value = firestore_doc_get_field_by_path(&doc, "user.age");
/// assert!(non_existent_value.is_none());
/// ```
pub fn firestore_doc_get_field_by_path<'d>(
doc: &'d FirestoreDocument,
field_path: &str,
) -> Option<&'d gcloud_sdk::google::firestore::v1::value::ValueType> {
let field_path: Vec<String> = field_path
.split('.')
.map(|s| s.to_string().replace('`', ""))
.collect();
firestore_doc_get_field_by_path_arr(&doc.fields, &field_path)
}
/// Internal helper function to recursively navigate the document fields.
fn firestore_doc_get_field_by_path_arr<'d>(
fields: &'d HashMap<String, gcloud_sdk::google::firestore::v1::Value>,
field_path_arr: &[String],
) -> Option<&'d gcloud_sdk::google::firestore::v1::value::ValueType> {
field_path_arr.first().and_then(|field_name| {
fields.get(field_name).and_then(|field_value| {
if field_path_arr.len() == 1 {
field_value.value_type.as_ref()
} else {
match field_value.value_type {
Some(gcloud_sdk::google::firestore::v1::value::ValueType::MapValue(
ref map_value,
)) => {
firestore_doc_get_field_by_path_arr(&map_value.fields, &field_path_arr[1..])
}
_ => None,
}
}
})
})
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_meta.rs | src/firestore_meta.rs | use crate::errors::FirestoreError;
use crate::timestamp_utils::{from_duration, from_timestamp};
use crate::FirestoreTransactionId;
use chrono::{DateTime, Duration, Utc};
use gcloud_sdk::google::firestore::v1::{Document, ExplainMetrics, RunQueryResponse};
use gcloud_sdk::prost_types::value::Kind;
use rsb_derive::Builder;
use std::collections::BTreeMap;
/// A container that pairs a document (or other data `T`) with its associated Firestore metadata.
///
/// This is often used in query responses where each document is accompanied by
/// metadata like its read time, transaction ID (if applicable), and potentially
/// query explanation metrics.
///
/// # Type Parameters
/// * `T`: The type of the document or data being wrapped. Typically `gcloud_sdk::google::firestore::v1::Document`
/// or a user-defined struct after deserialization.
#[derive(Debug, PartialEq, Clone)]
pub struct FirestoreWithMetadata<T> {
/// The document or data itself. This is an `Option` because some Firestore
/// operations (like a query that finds no results but still has metadata)
/// might not return a document.
pub document: Option<T>,
/// The metadata associated with the document or operation.
pub metadata: FirestoreDocumentMetadata,
}
/// Metadata associated with a Firestore document or a query operation.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreDocumentMetadata {
/// The ID of the transaction that was started as part of this request.
/// Present if the request initiated a transaction.
pub transaction_id: Option<FirestoreTransactionId>,
/// The time at which the document was read. This may be monotically increasing.
pub read_time: Option<DateTime<Utc>>,
/// The number of results that were skipped before returning the first result.
/// This is relevant for paginated queries or queries with an offset.
pub skipped_results: usize,
/// Query execution explanation metrics, if requested and available.
pub explain_metrics: Option<FirestoreExplainMetrics>,
}
/// Detailed metrics about query execution, if requested via [`FirestoreExplainOptions`](crate::FirestoreExplainOptions).
///
/// This includes a summary of the query plan and statistics about the execution.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreExplainMetrics {
/// A summary of the query plan.
pub plan_summary: Option<FirestorePlanSummary>,
/// Statistics about the query execution.
pub execution_stats: Option<FirestoreExecutionStats>,
}
/// A dynamically-typed structure used to represent complex, nested data
/// often found in Firestore's explain metrics or other metadata.
///
/// It holds fields as a `BTreeMap` of string keys to `gcloud_sdk::prost_types::Value`,
/// which is a generic protobuf value type capable of holding various data types.
#[derive(PartialEq, Clone, Builder)]
pub struct FirestoreDynamicStruct {
/// A map of field names to their `prost_types::Value` representations.
pub fields: BTreeMap<String, gcloud_sdk::prost_types::Value>,
}
impl std::fmt::Debug for FirestoreDynamicStruct {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fn pretty_print(v: &gcloud_sdk::prost_types::Value) -> String {
match v.kind.as_ref() {
Some(Kind::NullValue(_)) => "null".to_string(),
Some(Kind::BoolValue(v)) => v.to_string(),
Some(Kind::NumberValue(v)) => v.to_string(),
Some(Kind::StringValue(v)) => format!("'{v}'"),
Some(Kind::StructValue(v)) => v
.fields
.iter()
.map(|(k, v)| format!("{}: {}", k, pretty_print(v)))
.collect::<Vec<String>>()
.join(", "),
Some(Kind::ListValue(v)) => v
.values
.iter()
.map(pretty_print)
.collect::<Vec<String>>()
.join(", "),
None => "".to_string(),
}
}
let pretty_print_fields = self
.fields
.iter()
.map(|(k, v)| format!("{}: {}", k, pretty_print(v)))
.collect::<Vec<String>>()
.join(", ");
f.debug_struct("FirestoreDynamicStruct")
.field("fields", &pretty_print_fields)
.finish()
}
}
/// A summary of the query plan used by Firestore to execute a query.
///
/// This is part of the [`FirestoreExplainMetrics`] and provides insights into
/// how Firestore satisfied the query, particularly which indexes were utilized.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestorePlanSummary {
/// A list of indexes used to execute the query. Each index is represented
/// as a [`FirestoreDynamicStruct`] containing details about the index.
pub indexes_used: Vec<FirestoreDynamicStruct>,
}
/// Statistics related to the execution of a Firestore query.
///
/// This is part of the [`FirestoreExplainMetrics`] and provides performance-related
/// information such as the number of results, execution time, and read operations.
#[derive(Debug, PartialEq, Clone, Builder)]
pub struct FirestoreExecutionStats {
/// The number of results returned by the query.
pub results_returned: usize,
/// The duration it took to execute the query on the server.
pub execution_duration: Option<Duration>,
/// The number of read operations performed by the query.
pub read_operations: usize,
/// Additional debugging statistics, represented as a [`FirestoreDynamicStruct`].
/// The content of this can vary and is intended for debugging purposes.
pub debug_stats: Option<FirestoreDynamicStruct>,
}
impl TryFrom<RunQueryResponse> for FirestoreWithMetadata<Document> {
type Error = FirestoreError;
fn try_from(value: RunQueryResponse) -> Result<Self, Self::Error> {
Ok(FirestoreWithMetadata {
document: value.document,
metadata: FirestoreDocumentMetadata {
transaction_id: if !value.transaction.is_empty() {
Some(value.transaction)
} else {
None
},
read_time: value.read_time.map(from_timestamp).transpose()?,
skipped_results: value.skipped_results as usize,
explain_metrics: value.explain_metrics.map(|v| v.try_into()).transpose()?,
},
})
}
}
impl TryFrom<ExplainMetrics> for FirestoreExplainMetrics {
type Error = FirestoreError;
fn try_from(value: ExplainMetrics) -> Result<Self, Self::Error> {
Ok(FirestoreExplainMetrics {
plan_summary: value.plan_summary.map(|v| v.try_into()).transpose()?,
execution_stats: value.execution_stats.map(|v| v.try_into()).transpose()?,
})
}
}
impl TryFrom<gcloud_sdk::google::firestore::v1::PlanSummary> for FirestorePlanSummary {
type Error = FirestoreError;
fn try_from(
value: gcloud_sdk::google::firestore::v1::PlanSummary,
) -> Result<Self, Self::Error> {
Ok(FirestorePlanSummary {
indexes_used: value
.indexes_used
.into_iter()
.map(|v| FirestoreDynamicStruct::new(v.fields))
.collect(),
})
}
}
impl TryFrom<gcloud_sdk::google::firestore::v1::ExecutionStats> for FirestoreExecutionStats {
type Error = FirestoreError;
fn try_from(
value: gcloud_sdk::google::firestore::v1::ExecutionStats,
) -> Result<Self, Self::Error> {
Ok(FirestoreExecutionStats {
results_returned: value.results_returned as usize,
execution_duration: value.execution_duration.map(from_duration),
read_operations: value.read_operations as usize,
debug_stats: value
.debug_stats
.map(|v| FirestoreDynamicStruct::new(v.fields)),
})
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/lib.rs | src/lib.rs | //! # Firestore for Rust
//!
//! Library provides a simple API for Google Firestore:
//! - Create or update documents using Rust structures and Serde;
//! - Support for querying / streaming / listing / listening changes / aggregated queries of documents from Firestore;
//! - Fluent high-level and strongly typed API;
//! - Full async based on Tokio runtime;
//! - Macro that helps you use JSON paths as references to your structure fields;
//! - Implements own Serde serializer to Firestore gRPC values;
//! - Supports for Firestore timestamp with `#[serde(with)]`;
//! - Transactions support;
//! - Streaming batch writes with automatic throttling to avoid time limits from Firestore;
//! - Aggregated Queries;
//! - Google client based on [gcloud-sdk library](https://github.com/abdolence/gcloud-sdk-rs)
//! that automatically detects GKE environment or application default accounts for local development;
//!
//! ## Example using the Fluent API:
//!
//! ```rust,no_run
//!
//!use firestore::*;
//!use serde::{Deserialize, Serialize};
//!use futures::stream::BoxStream;
//!use futures::StreamExt;
//!
//!pub fn config_env_var(name: &str) -> Result<String, String> {
//! std::env::var(name).map_err(|e| format!("{}: {}", name, e))
//!}
//!
//!// Example structure to play with
//!#[derive(Debug, Clone, Deserialize, Serialize)]
//!struct MyTestStructure {
//! some_id: String,
//! some_string: String,
//! one_more_string: String,
//! some_num: u64,
//!}
//!
//!#[tokio::main]
//!async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {//!
//! // Create an instance
//! let db = FirestoreDb::new(&config_env_var("PROJECT_ID")?).await?;
//!
//! const TEST_COLLECTION_NAME: &'static str = "test";
//!
//! let my_struct = MyTestStructure {
//! some_id: "test-1".to_string(),
//! some_string: "Test".to_string(),
//! one_more_string: "Test2".to_string(),
//! some_num: 42,
//! };
//!
//! // Create documents
//! let object_returned: MyTestStructure = db.fluent()
//! .insert()
//! .into(TEST_COLLECTION_NAME)
//! .document_id(&my_struct.some_id)
//! .object(&my_struct)
//! .execute()
//! .await?;
//!
//! // Update documents
//! let object_updated: MyTestStructure = db.fluent()
//! .update()
//! .fields(paths!(MyTestStructure::{some_num, one_more_string})) // Update only specified fields
//! .in_col(TEST_COLLECTION_NAME)
//! .document_id(&my_struct.some_id)
//! .object(&MyTestStructure {
//! some_num: my_struct.some_num + 1,
//! one_more_string: "updated-value".to_string(),
//! ..my_struct.clone()
//! })
//! .execute()
//! .await?;
//!
//! // Get a document as an object by id
//! let find_it_again: Option<MyTestStructure> = db.fluent()
//! .select()
//! .by_id_in(TEST_COLLECTION_NAME)
//! .obj()
//! .one(&my_struct.some_id)
//! .await?;
//!
//! // Query and read stream of objects
//! let object_stream: BoxStream<MyTestStructure> = db.fluent()
//! .select()
//! .fields(paths!(MyTestStructure::{some_id, some_num, some_string, one_more_string})) // Optionally select the fields needed
//! .from(TEST_COLLECTION_NAME)
//! .filter(|q| { // Fluent filter API example
//! q.for_all([
//! q.field(path!(MyTestStructure::some_num)).is_not_null(),
//! q.field(path!(MyTestStructure::some_string)).eq("Test"),
//! // Sometimes you have optional filters
//! Some("Test2")
//! .and_then(|value| q.field(path!(MyTestStructure::one_more_string)).eq(value)),
//! ])
//! })
//! .order_by([(
//! path!(MyTestStructure::some_num),
//! FirestoreQueryDirection::Descending,
//! )])
//! .obj() // Reading documents as structures using Serde gRPC deserializer
//! .stream_query()
//! .await?;
//!
//! let as_vec: Vec<MyTestStructure> = object_stream.collect().await;
//! println!("{:?}", as_vec);
//!
//! // Delete documents
//! db.fluent()
//! .delete()
//! .from(TEST_COLLECTION_NAME)
//! .document_id(&my_struct.some_id)
//! .execute()
//! .await?;
//!
//! Ok(())
//! }
//! ```
//!
//! All examples and more docs available at: [github](https://github.com/abdolence/firestore-rs/tree/master/examples)
//!
#![allow(clippy::new_without_default)]
#![allow(clippy::needless_lifetimes)]
#![forbid(unsafe_code)]
/// Defines the error types used throughout the `firestore-rs` crate.
///
/// This module contains the primary [`FirestoreError`](errors::FirestoreError) enum
/// and various specific error structs that provide detailed information about
/// issues encountered during Firestore operations.
pub mod errors;
mod firestore_value;
/// Re-exports all public items from the `firestore_value` module.
///
/// The `firestore_value` module provides representations and utilities for
/// working with Firestore's native data types (e.g., `Map`, `Array`, `Timestamp`).
pub use firestore_value::*;
mod db;
/// Re-exports all public items from the `db` module.
///
/// The `db` module contains the core [`FirestoreDb`](db::FirestoreDb) client and
/// functionalities for interacting with the Firestore database, such as CRUD operations,
/// queries, and transactions.
pub use db::*;
mod firestore_serde;
/// Re-exports all public items from the `firestore_serde` module.
///
/// This module provides custom Serde serializers and deserializers for converting
/// Rust types to and from Firestore's data format. It enables seamless integration
/// of user-defined structs with Firestore.
pub use firestore_serde::*;
mod struct_path_macro;
/// Re-exports macros for creating type-safe paths to struct fields.
///
/// These macros, like `path!` and `paths!`, are used to refer to document fields
/// in a way that can be checked at compile time, reducing runtime errors when
/// specifying fields for queries, updates, or projections.
/// The `#[allow(unused_imports)]` is present because these are macro re-exports
/// and their usage pattern might trigger the lint incorrectly.
#[allow(unused_imports)]
pub use struct_path_macro::*;
/// Provides utility functions for working with Firestore timestamps.
///
/// This module includes helpers for converting between `chrono::DateTime<Utc>`
/// and Google's `Timestamp` protobuf type, often used with `#[serde(with)]`
/// attributes for automatic conversion.
pub mod timestamp_utils;
use crate::errors::FirestoreError;
/// A type alias for `std::result::Result<T, FirestoreError>`.
///
/// This is the standard result type used throughout the `firestore-rs` crate
/// for operations that can fail, encapsulating either a successful value `T`
/// or a [`FirestoreError`].
pub type FirestoreResult<T> = std::result::Result<T, FirestoreError>;
/// A type alias for the raw Firestore document representation.
///
/// This refers to `gcloud_sdk::google::firestore::v1::Document`, which is the
/// underlying gRPC/protobuf structure for a Firestore document.
pub type FirestoreDocument = gcloud_sdk::google::firestore::v1::Document;
mod firestore_meta;
/// Re-exports all public items from the `firestore_meta` module.
///
/// This module provides metadata associated with Firestore documents, such as
/// `create_time`, `update_time`, and `read_time`. These are often included
/// in responses from Firestore.
pub use firestore_meta::*;
mod firestore_document_functions;
/// Re-exports helper functions for working with [`FirestoreDocument`]s.
///
/// These functions provide conveniences for extracting data or metadata
/// from raw Firestore documents.
pub use firestore_document_functions::*;
mod fluent_api;
/// Re-exports all public items from the `fluent_api` module.
///
/// This module provides a high-level, fluent interface for building and executing
/// Firestore operations (select, insert, update, delete, list). It aims to make
/// common database interactions more ergonomic and type-safe.
pub use fluent_api::*;
/// Re-exports the `struct_path` crate.
///
/// The `struct_path` crate is a dependency that provides the core functionality
/// for the `path!` and `paths!` macros used for type-safe field path generation.
pub extern crate struct_path;
#[cfg(feature = "caching")]
/// Provides caching capabilities for Firestore operations.
///
/// This module is only available if the `caching` feature is enabled.
/// It allows for caching Firestore documents and query results to reduce latency
/// and read costs.
mod cache;
#[cfg(feature = "caching")]
/// Re-exports all public items from the `cache` module.
///
/// This is only available if the `caching` feature is enabled.
/// It includes types like [`FirestoreCache`](cache::FirestoreCache) and various
/// caching backends and configurations.
pub use cache::*;
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/timestamp_utils.rs | src/timestamp_utils.rs | use crate::errors::*;
use crate::FirestoreResult;
use chrono::prelude::*;
/// Converts a Google `prost_types::Timestamp` to a `chrono::DateTime<Utc>`.
///
/// Firestore uses Google's `Timestamp` protobuf message to represent timestamps.
/// This function facilitates conversion to the more commonly used `chrono::DateTime<Utc>`
/// in Rust applications.
///
/// # Arguments
/// * `ts`: The Google `Timestamp` to convert.
///
/// # Returns
/// A `FirestoreResult` containing the `DateTime<Utc>` on success, or a
/// `FirestoreError::DeserializeError` if the timestamp is invalid or out of range.
///
/// # Examples
/// ```rust
/// use firestore::timestamp_utils::from_timestamp;
/// use chrono::{Utc, TimeZone};
///
/// let prost_timestamp = gcloud_sdk::prost_types::Timestamp { seconds: 1670000000, nanos: 0 };
/// let chrono_datetime = from_timestamp(prost_timestamp).unwrap();
///
/// assert_eq!(chrono_datetime, Utc.with_ymd_and_hms(2022, 12, 2, 16, 53, 20).unwrap());
/// ```
pub fn from_timestamp(ts: gcloud_sdk::prost_types::Timestamp) -> FirestoreResult<DateTime<Utc>> {
if let Some(dt) = chrono::DateTime::from_timestamp(ts.seconds, ts.nanos as u32) {
Ok(dt)
} else {
Err(FirestoreError::DeserializeError(
FirestoreSerializationError::from_message(format!(
"Invalid or out-of-range datetime: {ts:?}" // Added :? for better debug output
)),
))
}
}
/// Converts a `chrono::DateTime<Utc>` to a Google `prost_types::Timestamp`.
///
/// This is the reverse of [`from_timestamp`], used when sending timestamp data
/// to Firestore.
///
/// # Arguments
/// * `dt`: The `chrono::DateTime<Utc>` to convert.
///
/// # Returns
/// The corresponding Google `Timestamp`.
///
/// # Examples
/// ```rust
/// use firestore::timestamp_utils::to_timestamp;
/// use chrono::{Utc, TimeZone};
///
/// let chrono_datetime = Utc.with_ymd_and_hms(2022, 12, 2, 16, 53, 20).unwrap();
/// let prost_timestamp = to_timestamp(chrono_datetime);
///
/// assert_eq!(prost_timestamp.seconds, 1670000000);
/// assert_eq!(prost_timestamp.nanos, 0);
/// ```
pub fn to_timestamp(dt: DateTime<Utc>) -> gcloud_sdk::prost_types::Timestamp {
gcloud_sdk::prost_types::Timestamp {
seconds: dt.timestamp(),
nanos: dt.nanosecond() as i32,
}
}
/// Converts a Google `prost_types::Duration` to a `chrono::Duration`.
///
/// Google's `Duration` protobuf message is used in some Firestore contexts,
/// for example, in query execution statistics.
///
/// # Arguments
/// * `duration`: The Google `Duration` to convert.
///
/// # Returns
/// The corresponding `chrono::Duration`.
///
/// # Examples
/// ```rust
/// use firestore::timestamp_utils::from_duration;
///
/// let prost_duration = gcloud_sdk::prost_types::Duration { seconds: 5, nanos: 500_000_000 };
/// let chrono_duration = from_duration(prost_duration);
///
/// assert_eq!(chrono_duration, chrono::Duration::milliseconds(5500));
/// ```
pub fn from_duration(duration: gcloud_sdk::prost_types::Duration) -> chrono::Duration {
chrono::Duration::seconds(duration.seconds)
+ chrono::Duration::nanoseconds(duration.nanos.into())
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/firestore_value.rs | src/firestore_value.rs | use gcloud_sdk::google::firestore::v1::Value;
use std::collections::HashMap;
/// Represents a Firestore value, wrapping the underlying gRPC `Value` type.
///
/// This struct provides a convenient way to work with Firestore's native data types
/// and is used extensively throughout the crate, especially in serialization and
/// deserialization, query filters, and field transformations.
///
/// It can represent various types such as null, boolean, integer, double, timestamp,
/// string, bytes, geo point, array, and map.
///
/// Conversions from common Rust types to `FirestoreValue` are typically handled by
/// the `From` trait implementations in the `firestore_serde` module (though not directly
/// visible in this file, they are a core part of how `FirestoreValue` is used).
///
/// # Examples
///
/// ```rust
/// use firestore::FirestoreValue;
///
/// // Or, for direct construction of a map value:
/// let fv_map = FirestoreValue::from_map(vec![
/// ("name", "Alice".into()), // .into() relies on From<T> for FirestoreValue
/// ("age", 30.into()),
/// ]);
/// ```
#[derive(Debug, PartialEq, Clone)]
pub struct FirestoreValue {
/// The underlying gRPC `Value` protobuf message.
pub value: Value,
}
impl FirestoreValue {
/// Creates a `FirestoreValue` directly from a `gcloud_sdk::google::firestore::v1::Value`.
pub fn from(value: Value) -> Self {
Self { value }
}
/// Creates a `FirestoreValue` representing a Firestore map from an iterator of key-value pairs.
///
/// # Type Parameters
/// * `I`: An iterator type yielding pairs of field names and their `FirestoreValue`s.
/// * `IS`: A type that can be converted into a string for field names.
///
/// # Arguments
/// * `fields`: An iterator providing the map's fields.
pub fn from_map<I, IS>(fields: I) -> Self
where
I: IntoIterator<Item = (IS, FirestoreValue)>,
IS: AsRef<str>,
{
let fields: HashMap<String, Value> = fields
.into_iter()
.map(|(k, v)| (k.as_ref().to_string(), v.value))
.collect();
Self::from(Value {
value_type: Some(
gcloud_sdk::google::firestore::v1::value::ValueType::MapValue(
gcloud_sdk::google::firestore::v1::MapValue { fields },
),
),
})
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/struct_path_macro.rs | src/struct_path_macro.rs | #[macro_export]
macro_rules! path {
($($x:tt)*) => {{
$crate::struct_path::path!($($x)*).to_string()
}};
}
#[macro_export]
macro_rules! paths {
($($x:tt)*) => {{
$crate::struct_path::paths!($($x)*).iter().map(|s| s.to_string()).collect::<Vec<String>>()
}};
}
#[macro_export]
macro_rules! path_camel_case {
($($x:tt)*) => {{
$crate::struct_path::path!($($x)*;case="camel").to_string()
}};
}
#[macro_export]
macro_rules! paths_camel_case {
($($x:tt)*) => {{
$crate::struct_path::paths!($($x)*;case="camel").into_iter().map(|s| s.to_string()).collect::<Vec<String>>()
}}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/configuration.rs | src/cache/configuration.rs | use crate::{FirestoreDb, FirestoreListenerTarget};
use std::collections::HashMap;
#[derive(Clone)]
pub struct FirestoreCacheConfiguration {
pub collections: HashMap<String, FirestoreCacheCollectionConfiguration>,
}
impl FirestoreCacheConfiguration {
#[inline]
pub fn new() -> Self {
Self {
collections: HashMap::new(),
}
}
#[inline]
pub fn add_collection_config(
mut self,
db: &FirestoreDb,
config: FirestoreCacheCollectionConfiguration,
) -> Self {
let collection_path = {
if let Some(ref parent) = config.parent {
format!("{}/{}", parent, config.collection_name)
} else {
format!("{}/{}", db.get_documents_path(), config.collection_name)
}
};
self.collections.extend(
[(collection_path, config)]
.into_iter()
.collect::<HashMap<String, FirestoreCacheCollectionConfiguration>>(),
);
self
}
}
#[derive(Debug, Clone)]
pub struct FirestoreCacheCollectionConfiguration {
pub collection_name: String,
pub parent: Option<String>,
pub listener_target: FirestoreListenerTarget,
pub collection_load_mode: FirestoreCacheCollectionLoadMode,
pub indices: Vec<FirestoreCacheIndexConfiguration>,
}
impl FirestoreCacheCollectionConfiguration {
#[inline]
pub fn new<S>(
collection_name: S,
listener_target: FirestoreListenerTarget,
collection_load_mode: FirestoreCacheCollectionLoadMode,
) -> Self
where
S: AsRef<str>,
{
Self {
collection_name: collection_name.as_ref().to_string(),
parent: None,
listener_target,
collection_load_mode,
indices: Vec::new(),
}
}
#[inline]
pub fn with_parent<S>(self, parent: S) -> Self
where
S: AsRef<str>,
{
Self {
parent: Some(parent.as_ref().to_string()),
..self
}
}
#[inline]
pub fn with_index(self, index: FirestoreCacheIndexConfiguration) -> Self {
let mut indices = self.indices;
indices.push(index);
Self { indices, ..self }
}
}
#[derive(Debug, Clone)]
pub enum FirestoreCacheCollectionLoadMode {
PreloadAllDocs,
PreloadAllIfEmpty,
PreloadNone,
}
#[derive(Debug, Clone)]
pub struct FirestoreCacheIndexConfiguration {
pub fields: Vec<String>,
pub unique: bool,
}
impl FirestoreCacheIndexConfiguration {
#[inline]
pub fn new<I>(fields: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
Self {
fields: fields.into_iter().map(|s| s.as_ref().to_string()).collect(),
unique: false,
}
}
#[inline]
pub fn unique(self, value: bool) -> Self {
Self {
unique: value,
..self
}
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/cache_query_engine.rs | src/cache/cache_query_engine.rs | use crate::cache::cache_filter_engine::FirestoreCacheFilterEngine;
use crate::*;
use futures::stream::BoxStream;
use futures::stream::StreamExt;
use futures::{future, TryStreamExt};
use std::cmp::Ordering;
#[derive(Clone)]
pub struct FirestoreCacheQueryEngine {
pub query: FirestoreQueryParams,
}
impl FirestoreCacheQueryEngine {
pub fn new(query: &FirestoreQueryParams) -> Self {
Self {
query: query.clone(),
}
}
pub fn params_supported(&self) -> bool {
self.query.all_descendants.iter().all(|x| !*x)
}
pub fn matches_doc(&self, doc: &FirestoreDocument) -> bool {
if let Some(filter) = &self.query.filter {
let filter_engine = FirestoreCacheFilterEngine::new(filter);
filter_engine.matches_doc(doc)
} else {
true
}
}
pub async fn sort_stream<'a, 'b>(
&'a self,
input: BoxStream<'b, FirestoreResult<FirestoreDocument>>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
if let Some(order_by) = &self.query.order_by {
let mut collected: Vec<FirestoreDocument> = input.try_collect().await?;
collected.sort_by(|doc_a, doc_b| {
let mut current_ordering = Ordering::Equal;
for sort_field in order_by {
match (
firestore_doc_get_field_by_path(doc_a, &sort_field.field_name),
firestore_doc_get_field_by_path(doc_b, &sort_field.field_name),
) {
(Some(field_a), Some(field_b)) => {
if cache::cache_filter_engine::compare_values(
cache::cache_filter_engine::CompareOp::Equal,
field_a,
field_b,
) {
continue;
}
if cache::cache_filter_engine::compare_values(
cache::cache_filter_engine::CompareOp::LessThan,
field_a,
field_b,
) {
current_ordering = match sort_field.direction {
FirestoreQueryDirection::Ascending => Ordering::Less,
FirestoreQueryDirection::Descending => Ordering::Greater,
}
} else {
current_ordering = match sort_field.direction {
FirestoreQueryDirection::Ascending => Ordering::Greater,
FirestoreQueryDirection::Descending => Ordering::Less,
}
}
}
(None, None) => current_ordering = Ordering::Equal,
(None, Some(_)) => current_ordering = Ordering::Equal,
(Some(_), None) => current_ordering = Ordering::Equal,
}
}
current_ordering
});
Ok(futures::stream::iter(collected.into_iter().map(Ok)).boxed())
} else {
Ok(input)
}
}
pub async fn limit_stream<'a, 'b>(
&'a self,
input: BoxStream<'b, FirestoreResult<FirestoreDocument>>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
if let Some(limit) = self.query.limit {
Ok(input
.scan(0_u32, move |index, doc| {
if *index < limit {
*index += 1;
future::ready(Some(doc))
} else {
future::ready(None)
}
})
.boxed())
} else {
Ok(input)
}
}
pub async fn offset_stream<'a, 'b>(
&'a self,
input: BoxStream<'b, FirestoreResult<FirestoreDocument>>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
if let Some(offset) = self.query.offset {
Ok(input.skip(offset as usize).boxed())
} else {
Ok(input)
}
}
pub async fn start_at_stream<'a, 'b>(
&'a self,
input: BoxStream<'b, FirestoreResult<FirestoreDocument>>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
if let Some(start_at) = &self.query.start_at {
if let Some(order_by) = &self.query.order_by {
let start_at = start_at.clone();
let order_by = order_by.clone();
Ok(input
.skip_while(move |doc_res| match doc_res {
Ok(doc) => match &start_at {
FirestoreQueryCursor::BeforeValue(values) => {
let result = values.iter().zip(&order_by).any(
|(value, ordered_field)| {
let order_by_comp = match ordered_field.direction {
FirestoreQueryDirection::Ascending => cache::cache_filter_engine::CompareOp::LessThan,
FirestoreQueryDirection::Descending => cache::cache_filter_engine::CompareOp::GreaterThan
};
match (
firestore_doc_get_field_by_path(
doc,
&ordered_field.field_name,
),
&value.value.value_type,
) {
(Some(field_a), Some(field_b)) => {
cache::cache_filter_engine::compare_values(
order_by_comp,
field_a,
field_b,
)
}
(_, _) => false,
}
},
);
future::ready(result)
}
FirestoreQueryCursor::AfterValue(values) => {
let result = values.iter().zip(&order_by).any(
|(value, ordered_field)| {
let order_by_comp = match ordered_field.direction {
FirestoreQueryDirection::Ascending => cache::cache_filter_engine::CompareOp::LessThanOrEqual,
FirestoreQueryDirection::Descending => cache::cache_filter_engine::CompareOp::GreaterThanOrEqual
};
match (
firestore_doc_get_field_by_path(
doc,
&ordered_field.field_name,
),
&value.value.value_type,
) {
(Some(field_a), Some(field_b)) => {
cache::cache_filter_engine::compare_values(
order_by_comp,
field_a,
field_b,
)
}
(_, _) => false,
}
},
);
future::ready(result)
}
},
Err(_) => future::ready(false),
})
.boxed())
} else {
Ok(input)
}
} else {
Ok(input)
}
}
pub async fn end_at_stream<'a, 'b>(
&'a self,
input: BoxStream<'b, FirestoreResult<FirestoreDocument>>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
if let Some(end_at) = &self.query.end_at {
if let Some(order_by) = &self.query.order_by {
let end_at = end_at.clone();
let order_by = order_by.clone();
Ok(input
.take_while(move |doc_res| match doc_res {
Ok(doc) => match &end_at {
FirestoreQueryCursor::BeforeValue(values) => {
let result = values.iter().zip(&order_by).any(
|(value, ordered_field)| {
let order_by_comp = match ordered_field.direction {
FirestoreQueryDirection::Ascending => cache::cache_filter_engine::CompareOp::LessThan,
FirestoreQueryDirection::Descending => cache::cache_filter_engine::CompareOp::GreaterThan
};
match (
firestore_doc_get_field_by_path(
doc,
&ordered_field.field_name,
),
&value.value.value_type,
) {
(Some(field_a), Some(field_b)) => {
cache::cache_filter_engine::compare_values(
order_by_comp,
field_a,
field_b,
)
}
(_, _) => false,
}
},
);
future::ready(result)
}
FirestoreQueryCursor::AfterValue(values) => {
let result = values.iter().zip(&order_by).any(
|(value, ordered_field)| {
let order_by_comp = match ordered_field.direction {
FirestoreQueryDirection::Ascending => cache::cache_filter_engine::CompareOp::LessThanOrEqual,
FirestoreQueryDirection::Descending => cache::cache_filter_engine::CompareOp::GreaterThanOrEqual
};
match (
firestore_doc_get_field_by_path(
doc,
&ordered_field.field_name,
),
&value.value.value_type,
) {
(Some(field_a), Some(field_b)) => {
cache::cache_filter_engine::compare_values(
order_by_comp,
field_a,
field_b,
)
}
(_, _) => false,
}
},
);
future::ready(result)
}
},
Err(_) => future::ready(false),
})
.boxed())
} else {
Ok(input)
}
} else {
Ok(input)
}
}
pub async fn process_query_stream<'a, 'b>(
&'a self,
input: BoxStream<'b, FirestoreResult<FirestoreDocument>>,
) -> FirestoreResult<BoxStream<'b, FirestoreResult<FirestoreDocument>>> {
let input = self.sort_stream(input).await?;
let input = self.limit_stream(input).await?;
let input = self.offset_stream(input).await?;
let input = self.start_at_stream(input).await?;
let input = self.end_at_stream(input).await?;
Ok(input)
}
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/options.rs | src/cache/options.rs | use crate::FirestoreListenerParams;
use rsb_derive::Builder;
use rvstruct::ValueStruct;
#[derive(Clone, Debug, Eq, PartialEq, Hash, ValueStruct)]
pub struct FirestoreCacheName(String);
#[derive(Debug, Eq, PartialEq, Clone, Builder)]
pub struct FirestoreCacheOptions {
pub name: FirestoreCacheName,
pub listener_params: Option<FirestoreListenerParams>,
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
abdolence/firestore-rs | https://github.com/abdolence/firestore-rs/blob/47f0e847ebda47a41e7a6f4458b54fd6af814874/src/cache/mod.rs | src/cache/mod.rs | //! Provides caching capabilities for Firestore data.
//!
//! This module allows for caching Firestore documents and query results to reduce
//! latency and the number of reads to the Firestore database. It defines traits
//! for cache backends and provides a `FirestoreCache` struct that orchestrates
//! listening to Firestore changes and updating the cache.
//!
//! # Key Components
//! - [`FirestoreCache`]: The main struct for managing a cache. It uses a
//! [`FirestoreListener`](crate::FirestoreListener) to receive real-time updates
//! from Firestore and a [`FirestoreCacheBackend`] to store and retrieve cached data.
//! - [`FirestoreCacheBackend`]: A trait that defines the interface for different
//! cache storage mechanisms (e.g., in-memory, persistent).
//! - [`FirestoreCacheOptions`]: Configuration options for the cache, such as its name
//! and listener parameters.
//! - [`FirestoreCachedValue`]: An enum indicating whether a value was retrieved from
//! the cache or if the cache should be skipped for a particular query.
//!
//! # Usage
//! To use the caching functionality, you typically:
//! 1. Implement the [`FirestoreCacheBackend`] trait for your chosen storage.
//! 2. Create a [`FirestoreDb`](crate::FirestoreDb) instance.
//! 3. Instantiate [`FirestoreCache`] with the database, backend, and a
//! [`FirestoreResumeStateStorage`](crate::FirestoreResumeStateStorage) for the listener.
//! 4. Call [`FirestoreCache::load()`] to initialize the cache and start listening for updates.
//! 5. Use methods on the cache backend (e.g., `get_doc_by_path`, `query_docs`) to retrieve data.
//! These methods might return cached data or indicate that the cache should be bypassed.
//!
//! The cache automatically updates in the background as changes occur in Firestore,
//! based on the targets added to its internal listener (often configured by the backend's `load` method).
use crate::*;
use std::sync::Arc;
mod options;
pub use options::*;
mod configuration;
pub use configuration::*;
mod backends;
pub use backends::*;
use async_trait::async_trait;
use futures::stream::BoxStream;
use futures::StreamExt;
use tracing::*;
mod cache_filter_engine;
mod cache_query_engine;
/// Manages a cache of Firestore data.
///
/// `FirestoreCache` listens to changes in Firestore for specified targets and updates
/// a cache backend accordingly. It provides methods to load initial data, manage the
/// listener lifecycle, and access the underlying cache backend.
///
/// # Type Parameters
/// * `B`: The type of the cache backend, implementing [`FirestoreCacheBackend`].
/// * `LS`: The type of storage for the listener's resume state, implementing
/// [`FirestoreResumeStateStorage`](crate::FirestoreResumeStateStorage).
pub struct FirestoreCache<B, LS>
where
B: FirestoreCacheBackend + Send + Sync + 'static,
LS: FirestoreResumeStateStorage,
{
inner: FirestoreCacheInner<B, LS>,
}
/// Inner state of the `FirestoreCache`.
struct FirestoreCacheInner<B, LS>
where
B: FirestoreCacheBackend + Send + Sync + 'static,
LS: FirestoreResumeStateStorage,
{
/// Configuration options for the cache.
pub options: FirestoreCacheOptions,
/// The cache backend implementation.
pub backend: Arc<B>,
/// The Firestore listener for real-time updates.
pub listener: FirestoreListener<FirestoreDb, LS>,
/// A clone of the Firestore database client.
pub db: FirestoreDb,
}
/// Represents a value that might be retrieved from the cache.
pub enum FirestoreCachedValue<T> {
/// The value was found and retrieved from the cache.
UseCached(T),
/// The cache should be skipped for this request; the caller should fetch directly from Firestore.
SkipCache,
}
impl<B, LS> FirestoreCache<B, LS>
where
B: FirestoreCacheBackend + Send + Sync + 'static,
LS: FirestoreResumeStateStorage + Clone + Send + Sync + 'static,
{
/// Creates a new `FirestoreCache` with default options for the given name.
///
/// # Arguments
/// * `name`: A unique name for this cache instance.
/// * `db`: A reference to the [`FirestoreDb`](crate::FirestoreDb) client.
/// * `backend`: The cache backend implementation.
/// * `listener_storage`: Storage for the listener's resume state.
///
/// # Returns
/// A `FirestoreResult` containing the new `FirestoreCache`.
pub async fn new(
name: FirestoreCacheName,
db: &FirestoreDb,
backend: B,
listener_storage: LS,
) -> FirestoreResult<Self>
where
B: FirestoreCacheBackend + Send + Sync + 'static,
{
let options = FirestoreCacheOptions::new(name);
Self::with_options(options, db, backend, listener_storage).await
}
/// Creates a new `FirestoreCache` with the specified options.
///
/// # Arguments
/// * `options`: [`FirestoreCacheOptions`] to configure the cache.
/// * `db`: A reference to the [`FirestoreDb`](crate::FirestoreDb) client.
/// * `backend`: The cache backend implementation.
/// * `listener_storage`: Storage for the listener's resume state.
///
/// # Returns
/// A `FirestoreResult` containing the new `FirestoreCache`.
pub async fn with_options(
options: FirestoreCacheOptions,
db: &FirestoreDb,
backend: B,
listener_storage: LS,
) -> FirestoreResult<Self>
where
B: FirestoreCacheBackend + Send + Sync + 'static,
{
let listener = if let Some(ref listener_params) = options.listener_params {
db.create_listener_with_params(listener_storage, listener_params.clone())
.await?
} else {
db.create_listener(listener_storage).await?
};
Ok(Self {
inner: FirestoreCacheInner {
options,
backend: Arc::new(backend),
listener,
db: db.clone(),
},
})
}
/// Returns the name of this cache instance.
pub fn name(&self) -> &FirestoreCacheName {
&self.inner.options.name
}
/// Loads initial data into the cache and starts the Firestore listener.
///
/// This method typically calls the backend's `load` method to determine which
/// Firestore targets to listen to, adds them to the internal listener, and then
/// starts the listener. The listener will then call the backend's `on_listen_event`
/// method for incoming changes.
///
/// # Returns
/// A `Result` indicating success or failure.
pub async fn load(&mut self) -> Result<(), FirestoreError> {
let backend_target_params = self
.inner
.backend
.load(&self.inner.options, &self.inner.db)
.await?;
for target_params in backend_target_params {
self.inner.listener.add_target(target_params)?;
}
let backend = self.inner.backend.clone();
self.inner
.listener
.start(move |event| {
let backend = backend.clone();
async move {
if let Err(err) = backend.on_listen_event(event).await {
error!(?err, "Error occurred while updating cache.");
};
Ok(())
}
})
.await?;
Ok(())
}
/// Shuts down the Firestore listener and the cache backend.
///
/// # Returns
/// A `Result` indicating success or failure.
pub async fn shutdown(&mut self) -> Result<(), FirestoreError> {
self.inner.listener.shutdown().await?;
self.inner.backend.shutdown().await?;
Ok(())
}
/// Returns a thread-safe reference-counted pointer to the cache backend.
pub fn backend(&self) -> Arc<B> {
self.inner.backend.clone()
}
/// Invalidates all data in the cache.
///
/// This calls the `invalidate_all` method on the cache backend.
///
/// # Returns
/// A `FirestoreResult` indicating success or failure.
pub async fn invalidate_all(&self) -> FirestoreResult<()> {
self.inner.backend.invalidate_all().await
}
}
/// Defines the contract for a Firestore cache backend.
///
/// Implementors of this trait are responsible for storing, retrieving, and updating
/// cached Firestore data.
#[async_trait]
pub trait FirestoreCacheBackend: FirestoreCacheDocsByPathSupport {
/// Loads initial data or configuration for the cache.
///
/// This method is called when [`FirestoreCache::load()`] is invoked. It should
/// determine which Firestore targets the cache needs to listen to and return
/// them as a `Vec<FirestoreListenerTargetParams>`. These targets will be added
/// to the `FirestoreCache`'s internal listener.
///
/// # Arguments
/// * `options`: The cache options.
/// * `db`: A reference to the Firestore database client.
///
/// # Returns
/// A `Result` containing the listener target parameters or an error.
async fn load(
&self,
options: &FirestoreCacheOptions,
db: &FirestoreDb,
) -> Result<Vec<FirestoreListenerTargetParams>, FirestoreError>;
/// Invalidates all data stored in the cache.
///
/// # Returns
/// A `FirestoreResult` indicating success or failure.
async fn invalidate_all(&self) -> FirestoreResult<()>;
/// Performs any necessary cleanup or shutdown procedures for the cache backend.
///
/// This is called when [`FirestoreCache::shutdown()`] is invoked.
///
/// # Returns
/// A `FirestoreResult` indicating success or failure.
async fn shutdown(&self) -> FirestoreResult<()>;
/// Handles a listen event from Firestore.
///
/// This method is called by the `FirestoreCache`'s listener when a change
/// occurs for one of the listened targets. The backend should update its
/// cached data based on the event.
///
/// # Arguments
/// * `event`: The [`FirestoreListenEvent`](crate::FirestoreListenEvent) received from Firestore.
///
/// # Returns
/// A `FirestoreResult` indicating success or failure of processing the event.
async fn on_listen_event(&self, event: FirestoreListenEvent) -> FirestoreResult<()>;
}
/// Defines support for retrieving and updating cached documents by their full path.
#[async_trait]
pub trait FirestoreCacheDocsByPathSupport {
/// Retrieves a single document from the cache by its full Firestore path.
///
/// # Arguments
/// * `document_path`: The full path to the document (e.g., "projects/P/databases/D/documents/C/ID").
///
/// # Returns
/// A `FirestoreResult` containing an `Option<FirestoreDocument>`.
/// `None` if the document is not found in the cache.
async fn get_doc_by_path(
&self,
document_path: &str,
) -> FirestoreResult<Option<FirestoreDocument>>;
/// Retrieves multiple documents from the cache by their full Firestore paths.
///
/// This default implementation iterates over `full_doc_ids` and calls `get_doc_by_path`
/// for each. Backends may provide a more optimized batch implementation.
///
/// # Arguments
/// * `full_doc_ids`: A slice of full document paths.
///
/// # Returns
/// A `FirestoreResult` containing a stream of `FirestoreResult<(String, Option<FirestoreDocument>)>`.
/// The `String` in the tuple is the document ID (last segment of the path).
async fn get_docs_by_paths<'a>(
&'a self,
full_doc_ids: &'a [String],
) -> FirestoreResult<BoxStream<'a, FirestoreResult<(String, Option<FirestoreDocument>)>>>
where
Self: Sync,
{
Ok(Box::pin(futures::stream::iter(full_doc_ids).filter_map({
move |document_path| async move {
match self.get_doc_by_path(document_path.as_str()).await {
Ok(maybe_doc) => maybe_doc.map(|document| {
let doc_id = document
.name
.split('/')
.last()
.map(|s| s.to_string())
.unwrap_or_else(|| document.name.clone());
Ok((doc_id, Some(document)))
}),
Err(err) => {
error!(%err, "Error occurred while reading from cache.");
None
}
}
}
})))
}
/// Updates or inserts a document in the cache.
///
/// The document's full path is typically derived from `document.name`.
///
/// # Arguments
/// * `document`: The [`FirestoreDocument`](crate::FirestoreDocument) to update/insert.
///
/// # Returns
/// A `FirestoreResult` indicating success or failure.
async fn update_doc_by_path(&self, document: &FirestoreDocument) -> FirestoreResult<()>;
/// Lists all documents in the cache for a given collection path.
///
/// # Arguments
/// * `collection_path`: The full path to the collection (e.g., "projects/P/databases/D/documents/C").
///
/// # Returns
/// A `FirestoreResult` containing a [`FirestoreCachedValue`]. If `UseCached`, it holds
/// a stream of `FirestoreResult<FirestoreDocument>`. If `SkipCache`, the caller
/// should fetch directly from Firestore.
async fn list_all_docs<'b>(
&self,
collection_path: &str,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>;
/// Queries documents in the cache for a given collection path and query parameters.
///
/// The backend is responsible for applying the filters and ordering defined in `query`
/// to its cached data.
///
/// # Arguments
/// * `collection_path`: The full path to the collection.
/// * `query`: The [`FirestoreQueryParams`](crate::FirestoreQueryParams) to apply.
///
/// # Returns
/// A `FirestoreResult` containing a [`FirestoreCachedValue`]. If `UseCached`, it holds
/// a stream of `FirestoreResult<FirestoreDocument>`. If `SkipCache`, the caller
/// should fetch directly from Firestore.
async fn query_docs<'b>(
&self,
collection_path: &str,
query: &FirestoreQueryParams,
) -> FirestoreResult<FirestoreCachedValue<BoxStream<'b, FirestoreResult<FirestoreDocument>>>>;
}
| rust | Apache-2.0 | 47f0e847ebda47a41e7a6f4458b54fd6af814874 | 2026-01-04T20:18:05.696537Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.