repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/services/discovery.rs | crates/worker/src/services/discovery.rs | use anyhow::Result;
use shared::models::node::Node;
use shared::security::request_signer::sign_request_with_nonce;
use shared::web3::wallet::Wallet;
pub(crate) struct DiscoveryService {
wallet: Wallet,
base_urls: Vec<String>,
endpoint: String,
}
impl DiscoveryService {
pub(crate) fn new(wallet: Wallet, base_urls: Vec<String>, endpoint: Option<String>) -> Self {
let urls = if base_urls.is_empty() {
vec!["http://localhost:8089".to_string()]
} else {
base_urls
};
Self {
wallet,
base_urls: urls,
endpoint: endpoint.unwrap_or_else(|| "/api/nodes".to_string()),
}
}
async fn upload_to_single_discovery(&self, node_config: &Node, base_url: &str) -> Result<()> {
let request_data = serde_json::to_value(node_config)?;
let signed_request =
sign_request_with_nonce(&self.endpoint, &self.wallet, Some(&request_data))
.await
.map_err(|e| anyhow::anyhow!("{}", e))?;
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"x-address",
self.wallet
.wallet
.default_signer()
.address()
.to_string()
.parse()
.unwrap(),
);
headers.insert("x-signature", signed_request.signature.parse().unwrap());
let request_url = format!("{}{}", base_url, &self.endpoint);
let client = reqwest::Client::new();
let response = client
.put(&request_url)
.headers(headers)
.json(
&signed_request
.data
.expect("Signed request data should always be present for discovery upload"),
)
.send()
.await?;
if !response.status().is_success() {
let status = response.status();
let error_text = response
.text()
.await
.unwrap_or_else(|_| "No error message".to_string());
return Err(anyhow::anyhow!(
"Error: Received response with status code {} from {}: {}",
status,
base_url,
error_text
));
}
Ok(())
}
pub(crate) async fn upload_discovery_info(&self, node_config: &Node) -> Result<()> {
let mut last_error: Option<String> = None;
for base_url in &self.base_urls {
match self.upload_to_single_discovery(node_config, base_url).await {
Ok(_) => {
// Successfully uploaded to one discovery service, return immediately
return Ok(());
}
Err(e) => {
last_error = Some(e.to_string());
}
}
}
// If we reach here, all discovery services failed
if let Some(error) = last_error {
Err(anyhow::anyhow!(
"Failed to upload to all discovery services. Last error: {}",
error
))
} else {
Err(anyhow::anyhow!(
"Failed to upload to all discovery services"
))
}
}
}
impl Clone for DiscoveryService {
fn clone(&self) -> Self {
Self {
wallet: self.wallet.clone(),
base_urls: self.base_urls.clone(),
endpoint: self.endpoint.clone(),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/services/mod.rs | crates/worker/src/services/mod.rs | pub(crate) mod discovery;
pub(crate) mod discovery_updater;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/state/mod.rs | crates/worker/src/state/mod.rs | pub(crate) mod system_state;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/state/system_state.rs | crates/worker/src/state/system_state.rs | use anyhow::bail;
use anyhow::Result;
use directories::ProjectDirs;
use log::debug;
use log::error;
use serde::{Deserialize, Serialize};
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::RwLock;
const STATE_FILENAME: &str = "heartbeat_state.toml";
fn get_default_state_dir() -> Option<String> {
ProjectDirs::from("com", "prime", "worker")
.map(|proj_dirs| proj_dirs.data_local_dir().to_string_lossy().into_owned())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct PersistedSystemState {
endpoint: Option<String>,
#[serde(
serialize_with = "serialize_keypair",
deserialize_with = "deserialize_keypair"
)]
p2p_keypair: p2p::Keypair,
}
fn serialize_keypair<S>(keypair: &p2p::Keypair, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let serialized = keypair
.to_protobuf_encoding()
.map_err(serde::ser::Error::custom)?;
serializer.serialize_bytes(&serialized)
}
fn deserialize_keypair<'de, D>(deserializer: D) -> Result<p2p::Keypair, D::Error>
where
D: serde::Deserializer<'de>,
{
let serialized: Vec<u8> = Deserialize::deserialize(deserializer)?;
p2p::Keypair::from_protobuf_encoding(&serialized).map_err(serde::de::Error::custom)
}
#[derive(Debug, Clone)]
pub(crate) struct SystemState {
last_heartbeat: Arc<RwLock<Option<std::time::Instant>>>,
is_running: Arc<RwLock<bool>>, // Keep is_running in the normal heartbeat state
endpoint: Arc<RwLock<Option<String>>>,
state_dir_overwrite: Option<PathBuf>,
disable_state_storing: bool,
compute_pool_id: u32,
p2p_keypair: p2p::Keypair,
}
impl SystemState {
pub(crate) fn new(
state_dir: Option<String>,
disable_state_storing: bool,
compute_pool_id: u32,
) -> Result<Self> {
let default_state_dir = get_default_state_dir();
debug!("Default state dir: {default_state_dir:?}");
let state_path = state_dir
.map(PathBuf::from)
.or_else(|| default_state_dir.map(PathBuf::from));
debug!("State path: {state_path:?}");
let mut endpoint = None;
let mut p2p_keypair = None;
// Try to load state, log info if creating new file
if !disable_state_storing {
if let Some(path) = &state_path {
let state_file = path.join(STATE_FILENAME);
if !state_file.exists() {
debug!(
"No state file found at {state_file:?}, will create on first state change"
);
} else if let Ok(Some(loaded_state)) = SystemState::load_state(path) {
debug!("Loaded previous state from {state_file:?}");
endpoint = loaded_state.endpoint;
p2p_keypair = Some(loaded_state.p2p_keypair);
} else {
bail!("failed to load state from {state_file:?}");
}
}
}
if p2p_keypair.is_none() {
p2p_keypair = Some(p2p::Keypair::generate_ed25519());
}
Ok(Self {
last_heartbeat: Arc::new(RwLock::new(None)),
is_running: Arc::new(RwLock::new(false)),
endpoint: Arc::new(RwLock::new(endpoint)),
state_dir_overwrite: state_path.clone(),
disable_state_storing,
compute_pool_id,
p2p_keypair: p2p_keypair.expect("p2p keypair must be Some at this point"),
})
}
fn save_state(&self, heartbeat_endpoint: Option<String>) -> Result<()> {
if !self.disable_state_storing {
debug!("Saving state");
if let Some(state_dir) = &self.state_dir_overwrite {
let state = PersistedSystemState {
endpoint: heartbeat_endpoint,
p2p_keypair: self.p2p_keypair.clone(),
};
debug!("state: {state:?}");
fs::create_dir_all(state_dir)?;
let state_path = state_dir.join(STATE_FILENAME);
// Use JSON serialization instead of TOML
match serde_json::to_string_pretty(&state) {
Ok(json_string) => {
fs::write(&state_path, json_string)?;
debug!("Saved state to {state_path:?}");
}
Err(e) => {
error!("Failed to serialize state: {e}");
return Err(anyhow::anyhow!("Failed to serialize state: {}", e));
}
}
}
}
Ok(())
}
fn load_state(state_dir: &Path) -> Result<Option<PersistedSystemState>> {
let state_path = state_dir.join(STATE_FILENAME);
if state_path.exists() {
let contents = fs::read_to_string(state_path)?;
match serde_json::from_str(&contents) {
Ok(state) => return Ok(Some(state)),
Err(e) => {
bail!("failed to parse state file: {e}");
}
}
}
Ok(None)
}
pub(crate) fn get_compute_pool_id(&self) -> u32 {
self.compute_pool_id
}
pub(crate) fn get_p2p_keypair(&self) -> &p2p::Keypair {
&self.p2p_keypair
}
pub(crate) fn get_p2p_id(&self) -> p2p::PeerId {
self.p2p_keypair.public().to_peer_id()
}
pub(crate) async fn update_last_heartbeat(&self) {
let mut heartbeat = self.last_heartbeat.write().await;
*heartbeat = Some(std::time::Instant::now());
}
pub(crate) async fn is_running(&self) -> bool {
*self.is_running.read().await
}
pub(crate) async fn set_running(
&self,
running: bool,
heartbeat_endpoint: Option<String>,
) -> Result<()> {
// Read current values
let current_running = self.is_running().await;
let current_endpoint = self.get_heartbeat_endpoint().await;
// Only update and save if values changed
if running != current_running || heartbeat_endpoint != current_endpoint {
let mut is_running = self.is_running.write().await;
let mut endpoint = self.endpoint.write().await;
*is_running = running;
if !running {
*endpoint = None;
} else {
*endpoint = heartbeat_endpoint;
}
if endpoint.is_some() {
if let Err(e) = self.save_state(endpoint.clone()) {
// Only save the endpoint
error!("Failed to save heartbeat state: {e}");
return Err(e);
}
}
}
Ok(())
}
pub(crate) async fn get_heartbeat_endpoint(&self) -> Option<String> {
let endpoint = self.endpoint.read().await;
endpoint.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use tempfile::TempDir;
fn setup_test_dir() -> TempDir {
tempfile::tempdir().expect("Failed to create temp directory")
}
#[tokio::test]
async fn test_state_dir_overwrite() {
let default_state_dir = get_default_state_dir();
assert!(default_state_dir.is_some());
}
#[tokio::test]
async fn test_new_state_dir() {
let temp_dir = setup_test_dir();
let state = SystemState::new(
Some(temp_dir.path().to_string_lossy().to_string()),
false,
0,
)
.unwrap();
let _ = state
.set_running(true, Some("http://localhost:8080/heartbeat".to_string()))
.await;
let state_file = temp_dir.path().join(STATE_FILENAME);
assert!(state_file.exists());
let contents = fs::read_to_string(state_file).expect("Failed to read state file");
let state: PersistedSystemState =
serde_json::from_str(&contents).expect("Failed to parse state file");
assert_eq!(
state.endpoint,
Some("http://localhost:8080/heartbeat".to_string())
);
}
#[tokio::test]
async fn test_corrupt_state_file() {
let temp_dir = setup_test_dir();
let state_file = temp_dir.path().join(STATE_FILENAME);
fs::write(&state_file, "invalid_toml_content").expect("Failed to write to state file");
assert!(SystemState::new(
Some(temp_dir.path().to_string_lossy().to_string()),
false,
0,
)
.is_err());
}
#[tokio::test]
async fn test_load_state() {
let keypair = p2p::Keypair::generate_ed25519();
let state = PersistedSystemState {
endpoint: Some("http://localhost:8080/heartbeat".to_string()),
p2p_keypair: keypair,
};
let serialized = serde_json::to_string_pretty(&state).unwrap();
let temp_dir = setup_test_dir();
let state_file = temp_dir.path().join(STATE_FILENAME);
fs::write(&state_file, serialized).unwrap();
let state = SystemState::new(
Some(temp_dir.path().to_string_lossy().to_string()),
false,
0,
)
.unwrap();
assert_eq!(
state.get_heartbeat_endpoint().await,
Some("http://localhost:8080/heartbeat".to_string())
);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/state.rs | crates/worker/src/docker/state.rs | use chrono::{DateTime, Utc};
use shared::models::task::{Task, TaskState};
use std::sync::Arc;
use tokio::sync::Mutex;
use uuid::Uuid;
pub(crate) struct DockerState {
current_task: Arc<Mutex<Option<Task>>>,
last_started: Arc<Mutex<Option<DateTime<Utc>>>>,
is_running: Arc<Mutex<bool>>,
}
impl DockerState {
pub(crate) fn new() -> Self {
Self {
current_task: Arc::new(Mutex::new(None)),
last_started: Arc::new(Mutex::new(None)),
is_running: Arc::new(Mutex::new(false)),
}
}
pub(crate) async fn set_current_task(&self, task: Option<Task>) {
let mut current_task = self.current_task.lock().await;
if let Some(existing_task) = &*current_task {
if let Some(new_task) = &task {
if existing_task.id == new_task.id
&& existing_task.generate_config_hash() == new_task.generate_config_hash()
{
return;
}
}
}
*current_task = task;
}
pub(crate) async fn update_task_state(&self, task_id: Uuid, state: TaskState) {
let mut current_task = self.current_task.lock().await;
if let Some(task) = current_task.as_mut() {
if task.id == task_id {
task.state = state;
}
} else {
println!("No current task found when trying to update state");
}
}
pub(crate) async fn get_current_task(&self) -> Option<Task> {
let current_task = self.current_task.lock().await;
current_task.clone()
}
pub(crate) async fn get_last_started(&self) -> Option<DateTime<Utc>> {
let last_started = self.last_started.lock().await;
*last_started
}
pub(crate) async fn set_last_started(&self, last_started: DateTime<Utc>) {
let mut last_started_guard = self.last_started.lock().await;
*last_started_guard = Some(last_started);
}
pub(crate) async fn get_is_running(&self) -> bool {
let is_running = self.is_running.lock().await;
*is_running
}
pub(crate) async fn set_is_running(&self, is_running: bool) {
let mut is_running_guard = self.is_running.lock().await;
*is_running_guard = is_running;
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/service.rs | crates/worker/src/docker/service.rs | use super::docker_manager::ContainerInfo;
use super::DockerManager;
use super::DockerState;
use crate::console::Console;
use bollard::models::ContainerStateStatusEnum;
use chrono::{DateTime, Utc};
use log::debug;
use shared::models::heartbeat::TaskDetails;
use shared::models::node::GpuSpecs;
use shared::models::task::Task;
use shared::models::task::TaskState;
use std::collections::HashMap;
use std::path::Path;
use std::sync::Arc;
use tokio::sync::Mutex;
use tokio::time::{interval, Duration};
use tokio_util::sync::CancellationToken;
pub(crate) struct DockerService {
docker_manager: Arc<DockerManager>,
cancellation_token: CancellationToken,
pub state: Arc<DockerState>,
gpu: Option<GpuSpecs>,
system_memory_mb: Option<u32>,
task_bridge_socket_path: String,
node_address: String,
}
const TASK_PREFIX: &str = "prime-task";
const RESTART_INTERVAL_SECONDS: i64 = 10;
impl DockerService {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
cancellation_token: CancellationToken,
gpu: Option<GpuSpecs>,
system_memory_mb: Option<u32>,
task_bridge_socket_path: String,
storage_path: String,
node_address: String,
disable_host_network_mode: bool,
) -> Self {
let docker_manager =
Arc::new(DockerManager::new(storage_path, disable_host_network_mode).unwrap());
Self {
docker_manager,
cancellation_token,
state: Arc::new(DockerState::new()),
gpu,
system_memory_mb,
task_bridge_socket_path,
node_address,
}
}
pub(crate) async fn run(&self) -> Result<(), Box<dyn std::error::Error>> {
let mut interval = interval(Duration::from_secs(5));
let manager = self.docker_manager.clone();
let cancellation_token = self.cancellation_token.clone();
let state = self.state.clone();
state.set_is_running(true).await;
let starting_container_tasks: Arc<Mutex<Vec<tokio::task::JoinHandle<()>>>> =
Arc::new(Mutex::new(Vec::new()));
let terminating_container_tasks: Arc<Mutex<Vec<tokio::task::JoinHandle<()>>>> =
Arc::new(Mutex::new(Vec::new()));
fn generate_task_id(task: &Option<Task>) -> Option<String> {
task.as_ref().map(|task| {
let config_hash = task.generate_config_hash();
format!("{}-{}-{:x}", TASK_PREFIX, task.id, config_hash)
})
}
async fn cleanup_tasks(tasks: &Arc<Mutex<Vec<tokio::task::JoinHandle<()>>>>) {
let mut tasks_guard = tasks.lock().await;
for handle in tasks_guard.iter() {
handle.abort();
}
tasks_guard.clear();
}
let manager_clone = manager.clone();
let terminate_manager = manager_clone.clone();
let task_state_clone = state.clone();
// Track consecutive failures
let mut consecutive_failures = 0;
loop {
tokio::select! {
_ = cancellation_token.cancelled() => {
cleanup_tasks(&starting_container_tasks).await;
cleanup_tasks(&terminating_container_tasks).await;
break;
}
_ = interval.tick() => {
{
let mut tasks = starting_container_tasks.lock().await;
tasks.retain(|handle| !handle.is_finished());
}
{
let mut tasks = terminating_container_tasks.lock().await;
tasks.retain(|handle| !handle.is_finished());
}
let current_task = task_state_clone.get_current_task().await;
let task_id = generate_task_id(¤t_task);
let all_containers = match manager.list_containers(true).await {
Ok(containers) => containers,
Err(e) => {
log::error!("Error listing containers: {e}");
continue;
}
};
let old_tasks: Vec<ContainerInfo> = all_containers
.iter()
.filter(|c| {
c.names.iter().any(|name| name.contains(TASK_PREFIX))
&& task_id
.as_ref().is_none_or(|id| !c.names.iter().any(|name| name.contains(id)))
})
.cloned()
.collect();
if !old_tasks.is_empty() {
for task in old_tasks {
let terminate_manager_clone = terminate_manager.clone();
let handle = tokio::spawn(async move {
let termination = terminate_manager_clone.remove_container(&task.id).await;
match termination {
Ok(_) => Console::info("DockerService", "Container terminated successfully"),
Err(e) => log::error!("Error terminating container: {e}"),
}
});
terminating_container_tasks.lock().await.push(handle);
}
}
if current_task.is_some() && task_id.is_some() {
let container_task_id = task_id.as_ref().unwrap().clone();
let container_match = all_containers.iter().find(|c| c.names.contains(&format!("/{container_task_id}")));
if container_match.is_none() {
let running_tasks = starting_container_tasks.lock().await;
let has_running_tasks = running_tasks.iter().any(|h| !h.is_finished());
drop(running_tasks);
if has_running_tasks {
Console::info("DockerService", "Container is still starting ...");
} else {
let last_started_time = match task_state_clone.get_last_started().await {
Some(time) => time,
None => DateTime::from_timestamp(0, 0).unwrap(),
};
let elapsed = Utc::now().signed_duration_since(last_started_time).num_seconds();
let backoff_seconds = RESTART_INTERVAL_SECONDS;
// wait for backoff period before starting a new container
if elapsed < backoff_seconds {
Console::info("DockerService", &format!("Waiting before starting new container ({}s remaining)...", backoff_seconds - elapsed));
} else {
if consecutive_failures > 0 {
Console::info("DockerService", &format!("Starting new container after {consecutive_failures} failures ({RESTART_INTERVAL_SECONDS}s interval)..."));
} else {
Console::info("DockerService", "Starting new container...");
}
let manager_clone = manager_clone.clone();
let state_clone = task_state_clone.clone();
let gpu = self.gpu.clone();
let system_memory_mb = self.system_memory_mb;
let task_bridge_socket_path = self.task_bridge_socket_path.clone();
let node_address = self.node_address.clone();
let handle = tokio::spawn(async move {
let Some(payload) = state_clone.get_current_task().await else {
return;
};
let cmd = match payload.cmd {
Some(cmd_vec) => {
cmd_vec.into_iter().map(|arg| {
arg.replace("${SOCKET_PATH}", &task_bridge_socket_path)
}).collect()
}
None => vec!["sleep".to_string(), "infinity".to_string()],
};
let mut env_vars: HashMap<String, String> = HashMap::new();
if let Some(env) = &payload.env_vars {
// Clone env vars and replace ${SOCKET_PATH} in values
for (key, value) in env.iter() {
let processed_value = value.replace("${SOCKET_PATH}", &task_bridge_socket_path);
env_vars.insert(key.clone(), processed_value);
}
}
env_vars.insert("NODE_ADDRESS".to_string(), node_address);
env_vars.insert("PRIME_MONITOR__SOCKET__PATH".to_string(), task_bridge_socket_path.to_string());
env_vars.insert("PRIME_TASK_ID".to_string(), payload.id.to_string());
let mut volumes = vec![
(
Path::new(&task_bridge_socket_path).parent().unwrap().to_path_buf().to_string_lossy().to_string(),
Path::new(&task_bridge_socket_path).parent().unwrap().to_path_buf().to_string_lossy().to_string(),
false,
false,
)
];
if let Some(volume_mounts) = &payload.volume_mounts {
for volume_mount in volume_mounts {
volumes.push((
volume_mount.host_path.clone(),
volume_mount.container_path.clone(),
false,
true
));
}
}
let shm_size = match system_memory_mb {
Some(mem_mb) => (mem_mb as u64) * 1024 * 1024 / 2, // Convert MB to bytes and divide by 2
None => {
Console::warning("System memory not available, using default shm size");
67108864 // Default to 64MB in bytes
}
};
match manager_clone.start_container(&payload.image, &container_task_id, Some(env_vars), Some(cmd), gpu, Some(volumes), Some(shm_size), payload.entrypoint, None).await {
Ok(container_id) => {
Console::info("DockerService", &format!("Container started with id: {container_id}"));
},
Err(e) => {
log::error!("Error starting container: {e}");
state_clone.update_task_state(payload.id, TaskState::FAILED).await;
}
}
state_clone.set_last_started(Utc::now()).await;
});
starting_container_tasks.lock().await.push(handle);
}
}
} else {
let container_status = container_match.unwrap().clone();
let status = match manager.get_container_details(&container_status.id).await {
Ok(status) => status,
Err(e) => {
log::error!("Error getting container details: {e}");
continue;
}
};
let task_state_current = match task_state_clone.get_current_task().await {
Some(task) => task.state,
None => {
log::error!("No task found in state");
continue;
}
};
// handle edge case where container instantly dies due to invalid command
if status.status == Some(ContainerStateStatusEnum::CREATED) && task_state_current == TaskState::FAILED {
Console::info("DockerService", "Task failed, waiting for new command from manager ...");
} else {
debug!("docker container status: {:?}, status_code: {:?}", status.status, status.status_code);
let task_state_live = match status.status {
Some(ContainerStateStatusEnum::RUNNING) => TaskState::RUNNING,
Some(ContainerStateStatusEnum::CREATED) => TaskState::PENDING,
Some(ContainerStateStatusEnum::EXITED) => {
match status.status_code {
Some(0) => TaskState::COMPLETED,
Some(_) => TaskState::FAILED, // Any non-zero exit code
None => TaskState::UNKNOWN,
}
},
Some(ContainerStateStatusEnum::DEAD) => TaskState::FAILED,
Some(ContainerStateStatusEnum::PAUSED) => TaskState::PAUSED,
Some(ContainerStateStatusEnum::RESTARTING) => TaskState::RESTARTING,
Some(ContainerStateStatusEnum::REMOVING) | Some(ContainerStateStatusEnum::EMPTY) | None => TaskState::UNKNOWN,
};
// Only log if state changed
if task_state_live != task_state_current {
Console::info("DockerService", &format!("Task state changed from {task_state_current:?} to {task_state_live:?}"));
if task_state_live == TaskState::FAILED {
consecutive_failures += 1;
Console::info("DockerService", &format!("Task failed (attempt {consecutive_failures}), waiting with exponential backoff before restart"));
} else if task_state_live == TaskState::RUNNING {
// Reset failure counter when container runs successfully
consecutive_failures = 0;
}
}
if let Some(task) = task_state_clone.get_current_task().await {
task_state_clone.update_task_state(task.id, task_state_live).await;
}
}
}
}
},
}
}
Ok(())
}
pub(crate) async fn get_logs(&self) -> Result<String, Box<dyn std::error::Error>> {
let current_task = self.state.get_current_task().await;
match current_task {
Some(task) => {
let config_hash = task.generate_config_hash();
let container_id = format!("{}-{}-{:x}", TASK_PREFIX, task.id, config_hash);
let logs = self
.docker_manager
.get_container_logs(&container_id, None)
.await?;
if logs.is_empty() {
Ok("No logs found in docker container".to_string())
} else {
Ok(logs)
}
}
None => Ok("No task running".to_string()),
}
}
pub(crate) async fn restart_task(&self) -> Result<(), Box<dyn std::error::Error>> {
let current_task = self.state.get_current_task().await;
match current_task {
Some(task) => {
let config_hash = task.generate_config_hash();
let container_id = format!("{}-{}-{:x}", TASK_PREFIX, task.id, config_hash);
self.docker_manager.restart_container(&container_id).await?;
Ok(())
}
None => Ok(()),
}
}
pub(crate) async fn get_task_details(&self, task: &Task) -> Option<TaskDetails> {
let config_hash = task.generate_config_hash();
let container_name = format!("{}-{}-{:x}", TASK_PREFIX, task.id, config_hash);
match self.docker_manager.list_containers(true).await {
Ok(containers) => {
let container = containers
.iter()
.find(|c| c.names.contains(&format!("/{container_name}")));
if let Some(container) = container {
match self
.docker_manager
.get_container_details(&container.id)
.await
{
Ok(details) => {
let docker_image_id = if let Ok(inspect_result) =
self.docker_manager.inspect_container(&container.id).await
{
inspect_result.image
} else {
Some(container.image.clone())
};
Some(TaskDetails {
docker_image_id,
container_id: Some(container.id.clone()),
container_status: details.status.map(|s| format!("{s:?}")),
container_created_at: Some(container.created),
container_exit_code: details.status_code,
})
}
Err(e) => {
debug!("Failed to get container details: {e}");
Some(TaskDetails {
docker_image_id: Some(container.image.clone()),
container_id: Some(container.id.clone()),
container_status: None,
container_created_at: Some(container.created),
container_exit_code: None,
})
}
}
} else {
debug!(
"Container {} not found for task {}",
container_name, task.id
);
None
}
}
Err(e) => {
debug!("Failed to list containers: {e}");
None
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy::primitives::Address;
use shared::models::task::Task;
use shared::models::task::TaskState;
use uuid::Uuid;
#[tokio::test]
#[serial_test::serial]
async fn test_docker_service_basic() {
let cancellation_token = CancellationToken::new();
let docker_service = DockerService::new(
cancellation_token.clone(),
None,
Some(1024),
"/tmp/com.prime.miner/metrics.sock".to_string(),
"/tmp/test-storage".to_string(),
Address::ZERO.to_string(),
false,
);
let task = Task {
image: "ubuntu:latest".to_string(),
name: "test".to_string(),
id: Uuid::new_v4(),
env_vars: None,
cmd: Some(vec!["sleep".to_string(), "5".to_string()]), // Reduced sleep time
entrypoint: None,
state: TaskState::PENDING,
created_at: Utc::now().timestamp_millis(),
..Default::default()
};
let task_clone = task.clone();
let state_clone = docker_service.state.clone();
docker_service
.state
.set_current_task(Some(task_clone))
.await;
assert_eq!(
docker_service.state.get_current_task().await.unwrap().name,
task.name
);
tokio::spawn(async move {
docker_service.run().await.unwrap();
});
// Reduced wait times
tokio::time::sleep(Duration::from_secs(2)).await;
state_clone.set_current_task(None).await;
tokio::time::sleep(Duration::from_secs(2)).await;
cancellation_token.cancel();
}
#[tokio::test]
#[serial_test::serial]
async fn test_socket_path_variable_replacement() {
let cancellation_token = CancellationToken::new();
let test_socket_path = "/custom/socket/path.sock";
let docker_service = DockerService::new(
cancellation_token.clone(),
None,
Some(1024),
test_socket_path.to_string(),
"/tmp/test-storage".to_string(),
Address::ZERO.to_string(),
false,
);
// Test command argument replacement
let task_with_cmd = Task {
image: "ubuntu:latest".to_string(),
name: "test_cmd_replacement".to_string(),
id: Uuid::new_v4(),
cmd: Some(vec!["echo".to_string(), "${SOCKET_PATH}".to_string()]),
env_vars: None,
entrypoint: None,
state: TaskState::PENDING,
created_at: Utc::now().timestamp_millis(),
..Default::default()
};
// Test environment variable replacement
let task_with_env = Task {
image: "ubuntu:latest".to_string(),
name: "test_env_replacement".to_string(),
id: Uuid::new_v4(),
cmd: None,
env_vars: Some(HashMap::from([
("MY_SOCKET_PATH".to_string(), "${SOCKET_PATH}".to_string()),
(
"CUSTOM_PATH".to_string(),
"prefix_${SOCKET_PATH}_suffix".to_string(),
),
("NORMAL_VAR".to_string(), "no_replacement".to_string()),
])),
entrypoint: None,
state: TaskState::PENDING,
created_at: Utc::now().timestamp_millis(),
..Default::default()
};
// Set tasks and verify state
docker_service
.state
.set_current_task(Some(task_with_cmd.clone()))
.await;
assert_eq!(
docker_service.state.get_current_task().await.unwrap().name,
task_with_cmd.name
);
docker_service
.state
.set_current_task(Some(task_with_env.clone()))
.await;
assert_eq!(
docker_service.state.get_current_task().await.unwrap().name,
task_with_env.name
);
// Note: We can't easily test the actual replacement in container start
// without mocking DockerManager, but we've verified the logic visually
cancellation_token.cancel();
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/mod.rs | crates/worker/src/docker/mod.rs | pub(crate) mod docker_manager;
pub(crate) mod service;
pub(crate) mod state;
pub(crate) mod task_container;
pub(crate) mod taskbridge;
pub(crate) use docker_manager::DockerManager;
pub(crate) use service::DockerService;
pub(crate) use state::DockerState;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/task_container.rs | crates/worker/src/docker/task_container.rs | #[derive(Debug, Clone, PartialEq)]
pub(crate) struct TaskContainer {
pub task_id: String,
pub config_hash: String,
}
impl TaskContainer {
pub(crate) fn data_dir_name(&self) -> String {
format!("prime-task-{}", self.task_id)
}
}
impl std::str::FromStr for TaskContainer {
type Err = &'static str;
fn from_str(container_name: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = container_name.trim_start_matches('/').split('-').collect();
if parts.len() >= 8 && parts[0] == "prime" && parts[1] == "task" {
let task_id = parts[2..7].join("-");
let config_hash = parts[7..].join("-");
Ok(Self {
task_id,
config_hash,
})
} else {
Err("Invalid container name format")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::str::FromStr;
#[test]
fn test_task_id_from_container_name() {
// Test with valid container name
let container_name = "prime-task-123e4567-e89b-12d3-a456-426614174000-a1b2c3d4";
let result = TaskContainer::from_str(container_name);
assert_eq!(
result.map(|c| c.task_id),
Ok("123e4567-e89b-12d3-a456-426614174000".to_string())
);
// Test with leading slash
let container_name = "/prime-task-123e4567-e89b-12d3-a456-426614174000-a1b2c3d4";
let result = TaskContainer::from_str(container_name);
assert_eq!(
result.map(|c| c.task_id),
Ok("123e4567-e89b-12d3-a456-426614174000".to_string())
);
// Test with invalid format
let container_name = "not-a-prime-task";
let result = TaskContainer::from_str(container_name);
assert!(result.is_err());
// Test with short UUID (should fail)
let container_name = "prime-task-short-uuid-hash";
let result = TaskContainer::from_str(container_name);
assert!(result.is_err());
// Test with no hash suffix
let container_name = "prime-task-123e4567-e89b-12d3-a456-426614174000";
let result = TaskContainer::from_str(container_name);
assert!(result.is_err());
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/docker_manager.rs | crates/worker/src/docker/docker_manager.rs | use crate::docker::task_container::TaskContainer;
use bollard::container::{
Config, CreateContainerOptions, ListContainersOptions, LogsOptions, StartContainerOptions,
};
use bollard::container::{InspectContainerOptions, LogOutput};
use bollard::errors::Error as DockerError;
use bollard::image::CreateImageOptions;
use bollard::models::ContainerStateStatusEnum;
use bollard::models::DeviceRequest;
use bollard::models::HostConfig;
use bollard::volume::CreateVolumeOptions;
use bollard::Docker;
use futures_util::StreamExt;
use log::{debug, error, info};
use shared::models::node::GpuSpecs;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::time::Duration;
use strip_ansi_escapes::strip;
#[derive(Debug, Clone)]
pub(crate) struct ContainerInfo {
pub id: String,
#[allow(unused)]
pub image: String,
pub names: Vec<String>,
#[allow(unused)]
pub created: i64,
}
#[derive(Debug, Clone)]
pub(crate) struct ContainerDetails {
#[allow(unused)]
pub id: String,
#[allow(unused)]
pub image: String,
pub status: Option<ContainerStateStatusEnum>,
pub status_code: Option<i64>,
#[allow(unused)]
pub names: Vec<String>,
#[allow(unused)]
pub created: i64,
}
pub(crate) struct DockerManager {
docker: Docker,
storage_path: String,
/// Controls whether to use host network mode for containers.
///
/// Currently defaults to host mode (when false) to work around performance issues
/// with Docker bridge networking on certain cloud providers. This is a trade-off
/// between security isolation and performance.
///
/// TODO: Investigate root cause of bridge network performance degradation and
/// implement a more optimal solution that maintains security isolation.
disable_host_network_mode: bool,
}
impl DockerManager {
const DEFAULT_LOG_TAIL: i64 = 300;
/// Sanitize a path component to prevent directory traversal attacks
fn sanitize_path_component(component: &str) -> Result<String, DockerError> {
// Remove any path separators and potentially dangerous characters
let sanitized = component
.chars()
.filter(|c| c.is_alphanumeric() || *c == '_' || *c == '-' || *c == '.')
.collect::<String>();
// Prevent empty strings and dot-only strings
if sanitized.is_empty() || sanitized == "." || sanitized == ".." {
return Err(DockerError::DockerResponseServerError {
status_code: 400,
message: format!("Invalid path component: {component}"),
});
}
// Prevent path components that are too long
if sanitized.len() > 255 {
return Err(DockerError::DockerResponseServerError {
status_code: 400,
message: "Path component too long".to_string(),
});
}
Ok(sanitized)
}
/// Safely construct a path within the storage directory
fn safe_storage_path(&self, components: &[&str]) -> Result<PathBuf, DockerError> {
let base_path = PathBuf::from(&self.storage_path);
let mut result = base_path.clone();
for component in components {
let sanitized = Self::sanitize_path_component(component)?;
result = result.join(sanitized);
}
// Ensure the final path is still within the base storage path
if !result.starts_with(&base_path) {
return Err(DockerError::DockerResponseServerError {
status_code: 400,
message: "Path traversal attempt detected".to_string(),
});
}
Ok(result)
}
/// Create a directory with secure permissions
fn create_secure_directory(path: &Path) -> Result<(), DockerError> {
std::fs::create_dir_all(path).map_err(|e| DockerError::DockerResponseServerError {
status_code: 500,
message: format!("Failed to create directory: {e}"),
})?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = std::fs::metadata(path)
.map_err(|e| DockerError::DockerResponseServerError {
status_code: 500,
message: format!("Failed to get directory metadata: {e}"),
})?
.permissions();
perms.set_mode(0o777);
std::fs::set_permissions(path, perms).map_err(|e| {
DockerError::DockerResponseServerError {
status_code: 500,
message: format!("Failed to set directory permissions: {e}"),
}
})?;
}
Ok(())
}
/// Create a new DockerManager instance
pub(crate) fn new(
storage_path: String,
disable_host_network_mode: bool,
) -> Result<Self, DockerError> {
let docker = match Docker::connect_with_unix_defaults() {
Ok(docker) => docker,
Err(e) => {
error!("Failed to connect to Docker daemon: {e}");
return Err(e);
}
};
// Validate and create storage directory
let storage_path_buf = PathBuf::from(&storage_path);
if !storage_path_buf.exists() {
info!("Creating storage directory: {storage_path}");
Self::create_secure_directory(&storage_path_buf)?;
} else {
// Verify it's a directory and writable
if !storage_path_buf.is_dir() {
return Err(DockerError::DockerResponseServerError {
status_code: 400,
message: format!("Storage path is not a directory: {storage_path}"),
});
}
}
info!("DockerManager initialized with storage path: {storage_path}");
Ok(Self {
docker,
storage_path,
disable_host_network_mode,
})
}
/// Pull a Docker image if it doesn't exist locally
pub(crate) async fn pull_image(&self, image: &str) -> Result<(), DockerError> {
debug!("Checking if image needs to be pulled: {image}");
// Check if the image uses :latest or :main tag
let should_always_pull = image.ends_with(":latest") || image.ends_with(":main");
// Only skip pulling if image exists locally AND it's not a :latest or :main tag
if !should_always_pull && self.docker.inspect_image(image).await.is_ok() {
debug!("Image {image} already exists locally");
return Ok(());
}
if should_always_pull {
info!(
"Image {image} uses :latest or :main tag, pulling to ensure we have the newest version"
);
} else {
info!("Image {image} not found locally, pulling...");
}
// Split image name and tag
let (image_name, tag) = match image.split_once(':') {
Some((name, tag)) => (name, tag),
None => (image, "latest"), // Default to latest if no tag specified
};
let options = CreateImageOptions {
from_image: image_name,
tag,
..Default::default()
};
let mut image_stream = self.docker.create_image(Some(options), None, None);
while let Some(info) = image_stream.next().await {
match info {
Ok(create_info) => {
debug!("Pull progress: {create_info:?}");
}
Err(e) => return Err(e),
}
}
info!("Successfully pulled image {image}");
Ok(())
}
#[allow(clippy::too_many_arguments)]
/// Start a new container with the given image and configuration
pub(crate) async fn start_container(
&self,
image: &str,
name: &str,
env_vars: Option<HashMap<String, String>>,
command: Option<Vec<String>>,
gpu: Option<GpuSpecs>,
// Simple Vec of (host_path, container_path, read_only, task_volume)
volumes: Option<Vec<(String, String, bool, bool)>>,
shm_size: Option<u64>,
entrypoint: Option<Vec<String>>,
restart_policy_max_retries: Option<i64>,
) -> Result<String, DockerError> {
info!("Starting to pull image: {image}");
let mut final_volumes = Vec::new();
let volume_name = format!("{name}_data");
let data_dir_name = match TaskContainer::from_str(name) {
Ok(task_container) => task_container.data_dir_name(),
Err(_) => {
// Fallback to using full container name if extraction fails
name.trim_start_matches('/').to_string()
}
};
let task_data_path = self.safe_storage_path(&[&data_dir_name, "data"])?;
Self::create_secure_directory(&task_data_path)?;
self.docker
.create_volume(CreateVolumeOptions {
name: volume_name.clone(),
driver: "local".to_string(),
driver_opts: HashMap::from([
("type".to_string(), "none".to_string()),
("o".to_string(), "bind".to_string()),
(
"device".to_string(),
task_data_path.to_string_lossy().to_string(),
),
]),
labels: HashMap::new(),
})
.await?;
final_volumes.push((volume_name, "/data".to_string(), false));
// Create shared volume if it doesn't exist (idempotent)
let shared_path = self.safe_storage_path(&["shared"])?;
Self::create_secure_directory(&shared_path)?;
// Try to create shared volume, ignore if it already exists
match self
.docker
.create_volume(CreateVolumeOptions {
name: "shared_data".to_string(),
driver: "local".to_string(),
driver_opts: HashMap::from([
("type".to_string(), "none".to_string()),
("o".to_string(), "bind".to_string()),
(
"device".to_string(),
shared_path.to_string_lossy().to_string(),
),
]),
labels: HashMap::new(),
})
.await
{
Ok(_) => {
debug!("Shared volume 'shared_data' created successfully");
}
Err(DockerError::DockerResponseServerError {
status_code: 409, ..
}) => {
debug!("Shared volume 'shared_data' already exists, reusing");
}
Err(e) => {
error!("Failed to create shared volume: {e}");
return Err(e);
}
}
final_volumes.push(("shared_data".to_string(), "/shared".to_string(), false));
self.pull_image(image).await?;
let env = env_vars.map(|vars| {
vars.iter()
.map(|(k, v)| format!("{k}={v}"))
.collect::<Vec<String>>()
});
let volume_binds = {
let mut binds = final_volumes
.iter()
.map(|(vol, container, read_only)| {
if *read_only {
format!("{vol}:{container}:ro")
} else {
format!("{vol}:{container}")
}
})
.collect::<Vec<String>>();
if let Some(vols) = volumes {
let processed_volumes: Vec<(String, String, bool)> = vols
.into_iter()
.map(|(host_path, container_path, read_only, task_volume)| {
if task_volume {
// Create volume mount directory within the task's storage area
// Remove leading slash and sanitize the path
let sanitized_host_path =
host_path.trim_start_matches('/').replace('/', "_");
let mount_dir_name = match TaskContainer::from_str(name) {
Ok(task_container) => task_container.data_dir_name(),
Err(_) => {
// Fallback to using full container name if extraction fails
name.trim_start_matches('/').to_string()
}
};
match self.safe_storage_path(&[
&mount_dir_name,
"mounts",
&sanitized_host_path,
]) {
Ok(volume_mount_dir) => {
// Create the directory
if let Err(e) = Self::create_secure_directory(&volume_mount_dir)
{
error!(
"Failed to create volume mount directory {}: {}",
volume_mount_dir.display(),
e
);
}
(
volume_mount_dir.to_string_lossy().to_string(),
container_path,
read_only,
)
}
Err(e) => {
error!("Failed to create secure path for volume mount: {e}");
// Fallback to original host path for non-task volumes
(host_path, container_path, read_only)
}
}
} else {
// Use the original host path for non-task volumes
(host_path, container_path, read_only)
}
})
.collect();
binds.extend(
processed_volumes
.into_iter()
.map(|(host, container, read_only)| {
if read_only {
format!("{host}:{container}:ro")
} else {
format!("{host}:{container}")
}
}),
);
}
Some(binds)
};
let network_mode = if self.disable_host_network_mode {
"bridge".to_string()
} else {
"host".to_string()
};
let host_config = if let Some(gpu) = gpu {
let device_ids = match &gpu.indices {
Some(indices) if !indices.is_empty() => {
// Use specific GPU indices if available
indices.iter().map(|i| i.to_string()).collect()
}
_ => {
// Request all available GPUs if no specific indices
vec!["all".to_string()]
}
};
Some(HostConfig {
network_mode: Some(network_mode),
extra_hosts: Some(vec!["host.docker.internal:host-gateway".into()]),
device_requests: Some(vec![DeviceRequest {
driver: Some("nvidia".into()),
count: None,
device_ids: Some(device_ids),
capabilities: Some(vec![vec!["gpu".into()]]),
options: Some(HashMap::new()),
}]),
binds: volume_binds,
shm_size: shm_size.map(|s| s as i64),
restart_policy: Some(bollard::models::RestartPolicy {
name: Some(bollard::models::RestartPolicyNameEnum::ON_FAILURE),
maximum_retry_count: restart_policy_max_retries,
}),
..Default::default()
})
} else {
Some(HostConfig {
network_mode: Some(network_mode),
extra_hosts: Some(vec!["host.docker.internal:host-gateway".into()]),
binds: volume_binds,
restart_policy: Some(bollard::models::RestartPolicy {
name: Some(bollard::models::RestartPolicyNameEnum::ON_FAILURE),
maximum_retry_count: restart_policy_max_retries,
}),
..Default::default()
})
};
// Create container configuration
let config = Config {
image: Some(image),
env: env.as_ref().map(|e| e.iter().map(String::as_str).collect()),
cmd: command
.as_ref()
.map(|c| c.iter().map(String::as_str).collect()),
entrypoint: entrypoint
.as_ref()
.map(|e| e.iter().map(String::as_str).collect()),
host_config,
..Default::default()
};
info!("Creating container with name: {name}");
// Create and start container
let container = self
.docker
.create_container(
Some(CreateContainerOptions {
name,
platform: None,
}),
config,
)
.await
.map_err(|e| {
error!("Failed to create container: {e}");
e
})?;
info!("Container created successfully with ID: {}", container.id);
debug!("Starting container {}", container.id);
self.docker
.start_container(&container.id, None::<StartContainerOptions<String>>)
.await?;
info!("Container {} started successfully", container.id);
Ok(container.id)
}
/// Remove container, volumes, and directories
pub(crate) async fn remove_container(&self, container_id: &str) -> Result<(), DockerError> {
let container = (self.get_container_details(container_id).await).ok();
if container.is_some() {
if let Err(e) = self.docker.stop_container(container_id, None).await {
error!("Failed to stop container: {e}");
}
}
let max_retries = 10;
// --- Step 1: Remove container with retries ---
for attempt in 0..max_retries {
match self.docker.remove_container(container_id, None).await {
Ok(_) => {
info!("Container {container_id} removed successfully");
break;
}
Err(DockerError::DockerResponseServerError {
status_code: 409, ..
}) => {
debug!(
"Container removal in progress, retrying ({}/{})",
attempt + 1,
max_retries
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
Err(DockerError::DockerResponseServerError {
status_code: 404, ..
}) => {
break;
}
Err(e) => {
info!("Failed to remove container {container_id}: {e}");
return Err(e);
}
}
}
// --- Step 2: Ensure container is actually gone ---
let mut gone = false;
for _ in 0..5 {
match self
.docker
.inspect_container(container_id, None::<InspectContainerOptions>)
.await
{
Ok(_) => {
debug!("Container {container_id} still exists, waiting...");
tokio::time::sleep(Duration::from_secs(1)).await;
}
Err(DockerError::DockerResponseServerError {
status_code: 404, ..
}) => {
gone = true;
break;
}
Err(e) => {
error!("Failed to inspect container {container_id}: {e}");
break;
}
}
}
if !gone {
error!("Container {container_id} still exists after waiting");
}
// --- Step 3: Remove volume with retries ---
if let Some(container) = container {
let trimmed_name = container.names.first().unwrap().trim_start_matches('/');
let volume_name = format!("{trimmed_name}_data");
for attempt in 0..max_retries {
match self.docker.remove_volume(&volume_name, None).await {
Ok(_) => {
info!("Volume {volume_name} removed successfully");
break;
}
Err(DockerError::DockerResponseServerError {
status_code: 404, ..
}) => {
debug!("Volume {volume_name} already removed");
break;
}
Err(DockerError::DockerResponseServerError {
status_code: 409, ..
}) => {
debug!(
"Volume {} is still in use, retrying ({}/{})",
volume_name,
attempt + 1,
max_retries
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
Err(e) => {
error!("Failed to remove volume {volume_name}: {e}");
break;
}
}
}
// --- Step 4: Check if other containers with same task ID exist before removing directory ---
let should_remove_directory = if let Ok(task_container) =
TaskContainer::from_str(trimmed_name)
{
// Check if there are other containers with the same task ID
match self.list_containers(true).await {
Ok(containers) => {
let other_containers_with_same_task = containers.iter().any(|c| {
c.names.iter().any(|name| {
let clean_name = name.trim_start_matches('/');
if let Ok(other_task_container) = TaskContainer::from_str(name) {
// Same task ID but different container (not the one being removed)
other_task_container.task_id == task_container.task_id
&& clean_name != trimmed_name
} else {
false
}
})
});
if other_containers_with_same_task {
info!(
"Other containers with task ID {} exist, keeping shared directory",
task_container.task_id
);
false
} else {
info!("No other containers with task ID {} found, safe to remove directory", task_container.task_id);
true
}
}
Err(e) => {
error!("Failed to list containers for cleanup check: {e}");
// Err on the side of caution - don't remove directory if we can't check
false
}
}
} else {
// If we can't extract task ID, use original behavior
true
};
if should_remove_directory {
let dir_name = if let Ok(task_container) = TaskContainer::from_str(trimmed_name) {
task_container.data_dir_name()
} else {
trimmed_name.to_string()
};
match self.safe_storage_path(&[&dir_name]) {
Ok(dir_path) => {
// Check if directory exists before attempting to remove it
if dir_path.exists() {
let mut success = false;
for attempt in 0..max_retries {
match std::fs::remove_dir_all(&dir_path) {
Ok(_) => {
info!(
"Directory {} removed successfully",
dir_path.display()
);
success = true;
break;
}
Err(e) => {
debug!(
"Attempt {}/{} failed to remove dir {}: {}",
attempt + 1,
max_retries,
dir_path.display(),
e
);
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
if !success {
error!(
"Failed to remove directory {} after {} attempts — trying fallback",
dir_path.display(), max_retries
);
// Try `rm -rf` as fallback
match std::process::Command::new("rm")
.arg("-rf")
.arg(&dir_path)
.status()
{
Ok(status) if status.success() => {
info!(
"Fallback removal of {} succeeded",
dir_path.display()
);
}
Ok(status) => {
error!("Fallback rm -rf failed with status {status}");
}
Err(e) => {
error!("Failed to execute fallback rm -rf: {e}");
}
}
}
} else {
debug!(
"Directory {} does not exist, skipping removal",
dir_path.display()
);
}
}
Err(e) => {
error!("Failed to create secure path for directory removal: {e}");
}
}
}
}
Ok(())
}
pub(crate) async fn list_containers(
&self,
list_all: bool,
) -> Result<Vec<ContainerInfo>, DockerError> {
debug!("Listing running containers");
let options = Some(ListContainersOptions::<String> {
all: list_all, // If true, list all containers. If false, only list running containers
..Default::default()
});
let containers = self.docker.list_containers(options).await?;
let container_details: Vec<ContainerInfo> = containers
.iter()
.map(|c| ContainerInfo {
id: c.id.clone().unwrap_or_default(),
image: c.image.clone().unwrap_or_default(),
names: c.names.clone().unwrap_or_default(),
created: c.created.unwrap_or_default(),
})
.collect();
Ok(container_details)
}
/// Get details for a specific container by ID
pub(crate) async fn get_container_details(
&self,
container_id: &str,
) -> Result<ContainerDetails, DockerError> {
debug!("Getting details for container: {container_id}");
let container = self.docker.inspect_container(container_id, None).await?;
let state = container.state.clone();
let info = ContainerDetails {
id: container.id.unwrap_or_default(),
image: container.image.unwrap_or_default(),
status: state.as_ref().and_then(|s| s.status),
status_code: state.as_ref().and_then(|s| s.exit_code),
names: vec![container.name.unwrap_or_default()],
created: container
.created
.and_then(|c| c.parse::<i64>().ok())
.unwrap_or_default(),
};
debug!("Retrieved details for container {container_id}");
Ok(info)
}
pub(crate) async fn restart_container(&self, container_id: &str) -> Result<(), DockerError> {
debug!("Restarting container: {container_id}");
self.docker.restart_container(container_id, None).await?;
debug!("Container {container_id} restarted successfully");
Ok(())
}
pub(crate) async fn get_container_logs(
&self,
container_id: &str,
tail: Option<i64>,
) -> Result<String, DockerError> {
let tail_value = tail.unwrap_or(Self::DEFAULT_LOG_TAIL).to_string();
let options = LogsOptions::<String> {
stdout: true,
stderr: true,
tail: tail_value,
timestamps: false,
follow: false,
..Default::default()
};
let mut logs_stream = self.docker.logs(container_id, Some(options));
let mut all_logs = Vec::new();
// Buffer to accumulate a line that might be updated via carriage returns
let mut current_line = String::new();
while let Some(log_result) = logs_stream.next().await {
match log_result {
Ok(log_output) => {
let message_bytes = match log_output {
LogOutput::StdOut { message }
| LogOutput::StdErr { message }
| LogOutput::Console { message }
| LogOutput::StdIn { message } => message,
};
// Strip ANSI escape sequences, skipping on error.
let cleaned: Vec<u8> = strip(&message_bytes);
// Convert to string without immediately replacing '\r'
let cleaned_str = String::from_utf8_lossy(&cleaned);
if cleaned_str.contains('\r') {
// For messages with carriage returns, treat it as an update to the current line.
let parts: Vec<&str> = cleaned_str.split('\r').collect();
if let Some(last_segment) = parts.last() {
// Update our current line buffer with the latest segment.
current_line = last_segment.to_string();
}
} else {
// Flush any buffered progress update if present.
if !current_line.is_empty() {
all_logs.push(current_line.clone());
current_line.clear();
}
// Process the message normally.
for line in cleaned_str.lines() {
let trimmed = line.trim();
if !trimmed.is_empty() {
all_logs.push(trimmed.to_string());
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/taskbridge/json_helper.rs | crates/worker/src/docker/taskbridge/json_helper.rs | // Helper function to extract the next complete JSON object from a string
pub(super) fn extract_next_json(input: &[u8]) -> Option<(&str, usize)> {
// Skip any leading whitespace (including newlines)
let mut start_pos = 0;
while start_pos < input.len() && (input[start_pos] <= 32) {
// ASCII space and below includes all whitespace
start_pos += 1;
}
if start_pos >= input.len() {
return None; // No content left
}
// If we find an opening brace, look for the matching closing brace
if input[start_pos] == b'{' {
let mut brace_count = 1;
let mut pos = start_pos + 1;
while pos < input.len() && brace_count > 0 {
match input[pos] {
b'{' => brace_count += 1,
b'}' => brace_count -= 1,
_ => {}
}
pos += 1;
}
if brace_count == 0 {
// Found a complete JSON object
if let Ok(json_str) = std::str::from_utf8(&input[start_pos..pos]) {
return Some((json_str, pos));
}
}
}
// Alternatively, look for a newline-terminated JSON object
if let Some(newline_pos) = input[start_pos..].iter().position(|&c| c == b'\n') {
let end_pos = start_pos + newline_pos;
// Check if we have a complete JSON object in this line
if let Ok(line) = std::str::from_utf8(&input[start_pos..end_pos]) {
let trimmed = line.trim();
if trimmed.starts_with('{') && trimmed.ends_with('}') {
return Some((trimmed, end_pos + 1)); // +1 to consume the newline
}
}
// If not a complete JSON object, skip this line and try the next
return extract_next_json(&input[end_pos + 1..])
.map(|(json, len)| (json, len + end_pos + 1));
}
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_empty_input() {
assert_eq!(extract_next_json(b""), None);
}
#[test]
fn test_whitespace_only() {
assert_eq!(extract_next_json(b" \n\t "), None);
}
#[test]
fn test_simple_json() {
let input = b"{\"key\": \"value\"}";
let (json, pos) = extract_next_json(input).unwrap();
assert_eq!(json, "{\"key\": \"value\"}");
assert_eq!(pos, input.len());
}
#[test]
fn test_json_with_whitespace() {
let input = b" {\"key\": \"value\"} ";
let (json, pos) = extract_next_json(input).unwrap();
assert_eq!(json, "{\"key\": \"value\"}");
assert_eq!(pos, 18);
}
#[test]
fn test_nested_json() {
let input = b"{\"outer\": {\"inner\": \"value\"}}";
let (json, pos) = extract_next_json(input).unwrap();
assert_eq!(json, "{\"outer\": {\"inner\": \"value\"}}");
assert_eq!(pos, input.len());
}
#[test]
fn test_multiple_json_objects() {
let input = b"{\"first\": 1}\n{\"second\": 2}";
let (json, pos) = extract_next_json(input).unwrap();
assert_eq!(json, "{\"first\": 1}");
assert_eq!(pos, 12);
}
#[test]
fn test_incomplete_json() {
let input = b"{\"key\": \"value\"";
assert_eq!(extract_next_json(input), None);
}
#[test]
fn test_json_with_newlines() {
let input = b"{\n \"key\": \"value\"\n}";
let (json, pos) = extract_next_json(input).unwrap();
assert_eq!(json, "{\n \"key\": \"value\"\n}");
assert_eq!(pos, input.len());
}
#[test]
fn test_multiple_json_with_whitespace() {
let input = b" {\"first\": 1} \n {\"second\": 2} ";
let (json, pos) = extract_next_json(input).unwrap();
assert_eq!(json, "{\"first\": 1}");
assert_eq!(pos, 14);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/taskbridge/bridge.rs | crates/worker/src/docker/taskbridge/bridge.rs | use crate::docker::taskbridge::file_handler;
use crate::docker::taskbridge::json_helper;
use crate::metrics::store::MetricsStore;
use crate::state::system_state::SystemState;
use anyhow::bail;
use anyhow::Result;
use futures::future::BoxFuture;
use futures::stream::FuturesUnordered;
use futures::FutureExt;
use futures::StreamExt as _;
use log::{debug, error, info, warn};
use rust_ipfs::Ipfs;
use serde::{Deserialize, Serialize};
use shared::models::node::Node;
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::wallet::Wallet;
use shared::web3::wallet::WalletProvider;
use std::collections::HashSet;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::sync::Arc;
use std::{fs, path::Path};
use tokio::io::AsyncReadExt;
use tokio::{io::BufReader, net::UnixListener};
const DEFAULT_SOCKET_FILE: &str = "prime-worker/com.prime.worker/metrics.sock";
pub(crate) struct TaskBridge {
socket_path: std::path::PathBuf,
config: TaskBridgeConfig,
}
#[derive(Clone)]
struct TaskBridgeConfig {
metrics_store: Arc<MetricsStore>,
// TODO: the optional values are only used for testing; refactor
// the tests such that these aren't optional
contracts: Option<Contracts<WalletProvider>>,
node_config: Option<Node>,
node_wallet: Option<Wallet>,
docker_storage_path: String,
state: Arc<SystemState>,
ipfs: Option<Ipfs>,
}
#[derive(Deserialize, Serialize, Debug)]
struct MetricInput {
task_id: String,
#[serde(flatten)]
metrics: std::collections::HashMap<String, serde_json::Value>,
}
impl TaskBridge {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
socket_path: Option<&str>,
metrics_store: Arc<MetricsStore>,
contracts: Option<Contracts<WalletProvider>>,
node_config: Option<Node>,
node_wallet: Option<Wallet>,
docker_storage_path: String,
state: Arc<SystemState>,
ipfs: Option<Ipfs>,
) -> Result<Self> {
let path = match socket_path {
Some(path) => std::path::PathBuf::from(path),
None => {
let path =
homedir::my_home()?.ok_or(anyhow::anyhow!("failed to get home directory"))?;
path.join(DEFAULT_SOCKET_FILE)
}
};
Ok(Self {
socket_path: path,
config: TaskBridgeConfig {
metrics_store,
contracts,
node_config,
node_wallet,
docker_storage_path,
state,
ipfs,
},
})
}
pub(crate) fn get_socket_path(&self) -> &std::path::Path {
&self.socket_path
}
pub(crate) async fn run(self) -> Result<()> {
let Self {
socket_path,
config,
} = self;
let socket_path = Path::new(&socket_path);
debug!("Setting up TaskBridge socket at: {}", socket_path.display());
if let Some(parent) = socket_path.parent() {
match fs::create_dir_all(parent) {
Ok(_) => debug!("Created parent directory: {}", parent.display()),
Err(e) => {
error!(
"Failed to create parent directory {}: {}",
parent.display(),
e
);
return Err(e.into());
}
}
}
// Cleanup existing socket if present
if socket_path.exists() {
match fs::remove_file(socket_path) {
Ok(_) => debug!("Removed existing socket file"),
Err(e) => {
error!("Failed to remove existing socket file: {e}");
return Err(e.into());
}
}
}
let listener = match UnixListener::bind(socket_path) {
Ok(l) => {
info!("Successfully bound to Unix socket");
l
}
Err(e) => {
error!("Failed to bind Unix socket: {e}");
return Err(e.into());
}
};
// allow both owner and group to read/write
match fs::set_permissions(socket_path, fs::Permissions::from_mode(0o666)) {
Ok(_) => debug!("Set socket permissions to 0o666"),
Err(e) => {
error!("Failed to set socket permissions: {e}");
return Err(e.into());
}
}
info!("TaskBridge socket created at: {}", socket_path.display());
let mut handle_stream_futures = FuturesUnordered::new();
let mut listener_stream = tokio_stream::wrappers::UnixListenerStream::new(listener);
let (file_validation_futures_tx, mut file_validation_futures_rx) =
tokio::sync::mpsc::channel::<(String, BoxFuture<anyhow::Result<()>>)>(100);
let mut file_validation_futures_set = HashSet::new();
let mut file_validation_futures = FuturesUnordered::new();
let (file_upload_futures_tx, mut file_upload_futures_rx) =
tokio::sync::mpsc::channel::<BoxFuture<anyhow::Result<()>>>(100);
let mut file_upload_futures_set = FuturesUnordered::new();
loop {
tokio::select! {
Some(res) = listener_stream.next() => {
match res {
Ok(stream) => {
let handle_future = handle_stream(config.clone(), stream, file_upload_futures_tx.clone(), file_validation_futures_tx.clone()).fuse();
handle_stream_futures.push(tokio::task::spawn(handle_future));
}
Err(e) => {
error!("Accept failed on Unix socket: {e}");
}
}
}
Some(res) = handle_stream_futures.next() => {
match res {
Ok(Ok(())) => {
debug!("Stream handler completed successfully");
}
Ok(Err(e)) => {
error!("Stream handler failed: {e}");
}
Err(e) => {
error!("Error joining stream handler task: {e}");
}
}
}
Some((hash, fut)) = file_validation_futures_rx.recv() => {
if file_validation_futures_set.contains(&hash) {
debug!("duplicate file validation task for hash: {hash}, skipping");
continue;
}
// we never remove hashes from this set, as we should never
// submit the same file for validation twice.
file_validation_futures_set.insert(hash.clone());
file_validation_futures.push(async move {(hash, tokio::task::spawn(fut).await)});
}
Some((hash, res)) = file_validation_futures.next() => {
match res {
Ok(Ok(())) => {
debug!("File validation task for hash {hash} completed successfully");
}
Ok(Err(e)) => {
error!("File validation task for hash {hash} failed: {e}");
}
Err(e) => {
error!("Error joining file validation task for hash {hash}: {e}");
}
}
}
Some(fut) = file_upload_futures_rx.recv() => {
file_upload_futures_set.push(tokio::task::spawn(fut));
}
Some(res) = file_upload_futures_set.next() => {
match res {
Ok(Ok(())) => {
debug!("File upload task completed successfully");
}
Ok(Err(e)) => {
error!("File upload task failed: {e}");
}
Err(e) => {
error!("Error joining file upload task: {e}");
}
}
}
}
}
}
}
async fn handle_stream(
config: TaskBridgeConfig,
stream: tokio::net::UnixStream,
file_upload_futures_tx: tokio::sync::mpsc::Sender<BoxFuture<'_, anyhow::Result<()>>>,
file_validation_futures_tx: tokio::sync::mpsc::Sender<(
String,
BoxFuture<'_, anyhow::Result<()>>,
)>,
) -> Result<()> {
let addr = stream.peer_addr()?;
debug!("Received connection from {addr:?}");
let mut reader = BufReader::new(stream);
let mut buffer = vec![0; 1024];
let mut data = Vec::new();
loop {
let n = match reader.read(&mut buffer).await {
Ok(0) => {
debug!("Connection closed by client");
0
}
Ok(n) => {
debug!("Read {n} bytes from socket");
n
}
Err(e) => {
bail!("Error reading from stream: {e}");
}
};
data.extend_from_slice(&buffer[..n]);
debug!("Current data buffer size: {} bytes", data.len());
if let Ok(data_str) = std::str::from_utf8(&data) {
debug!("Raw data received: {data_str}");
} else {
debug!("Raw data received (non-UTF8): {} bytes", data.len());
}
let mut current_pos = 0;
while current_pos < data.len() {
// Try to find a complete JSON object
if let Some((json_str, byte_length)) =
json_helper::extract_next_json(&data[current_pos..])
{
let json_str = json_str.to_string();
if let Err(e) = handle_message(
config.clone(),
&json_str,
file_upload_futures_tx.clone(),
file_validation_futures_tx.clone(),
)
.await
{
error!("Error handling message: {e}");
}
current_pos += byte_length;
debug!("Advanced position to {current_pos} after processing JSON");
} else {
debug!("No complete JSON object found, waiting for more data");
break;
}
}
data = data.split_off(current_pos);
debug!(
"Remaining data buffer size after processing: {} bytes",
data.len()
);
if n == 0 {
if data.is_empty() {
// No data left to process, we can break
break;
} else {
// We have data but couldn't parse it as complete JSON objects
// and the connection is closed - log and discard
if let Ok(unparsed) = std::str::from_utf8(&data) {
warn!("Discarding unparseable data after connection close: {unparsed}");
} else {
warn!(
"Discarding unparseable binary data after connection close ({} bytes)",
data.len()
);
}
// Break out of the loop
break;
}
}
}
Ok(())
}
async fn handle_metric(config: TaskBridgeConfig, input: &MetricInput) -> Result<()> {
debug!("Processing metric message");
for (key, value) in input.metrics.iter() {
debug!("Metric - Key: {key}, Value: {value}");
let _ = config
.metrics_store
.update_metric(
input.task_id.clone(),
key.to_string(),
value.as_f64().unwrap_or(0.0),
)
.await;
}
Ok(())
}
async fn handle_file_upload(
config: TaskBridgeConfig,
json_str: &str,
file_upload_futures_tx: tokio::sync::mpsc::Sender<BoxFuture<'_, anyhow::Result<()>>>,
file_validation_futures_tx: tokio::sync::mpsc::Sender<(
String,
BoxFuture<'_, anyhow::Result<()>>,
)>,
) -> Result<()> {
debug!("Handling file upload");
if let Ok(file_info) = serde_json::from_str::<serde_json::Value>(json_str) {
let task_id = file_info["task_id"].as_str().unwrap_or("unknown");
// Handle file upload if save_path is present
if let Some(file_name) = file_info["output/save_path"].as_str() {
info!("Handling file upload for task_id: {task_id}, file: {file_name}");
let Some(wallet) = config.node_wallet.as_ref() else {
bail!("no wallet found; must be set to upload files");
};
let _ = file_upload_futures_tx
.send(Box::pin(file_handler::handle_file_upload(
config.docker_storage_path.clone(),
task_id.to_string(),
file_name.to_string(),
wallet.clone(),
config.state.clone(),
config.ipfs.clone(),
)))
.await;
}
// Handle file validation if sha256 is present
if let Some(file_sha) = file_info["output/sha256"].as_str() {
debug!("Processing file validation message");
let output_flops: f64 = file_info["output/output_flops"].as_f64().unwrap_or(0.0);
let input_flops: f64 = file_info["output/input_flops"].as_f64().unwrap_or(0.0);
info!(
"Handling file validation for task_id: {task_id}, sha: {file_sha}, output_flops: {output_flops}, input_flops: {input_flops}"
);
if let (Some(contracts), Some(node)) =
(config.contracts.clone(), config.node_config.clone())
{
let provider = match config.node_wallet.as_ref() {
Some(wallet) => wallet.provider(),
None => {
error!("No wallet provider found");
return Err(anyhow::anyhow!("No wallet provider found"));
}
};
if output_flops <= 0.0 {
error!("Invalid work units calculation: output_flops ({output_flops}) must be greater than 0.0. Blocking file validation submission.");
return Err(anyhow::anyhow!(
"Invalid work units: output_flops must be greater than 0.0"
));
}
let work_units = output_flops;
let _ = file_validation_futures_tx
.send((
file_sha.to_string(),
Box::pin(file_handler::handle_file_validation(
file_sha.to_string(),
contracts.clone(),
node.clone(),
provider,
work_units,
)),
))
.await;
} else {
error!("Missing contracts or node configuration for file validation");
}
}
} else {
error!("Failed to parse JSON: {json_str}");
}
Ok(())
}
async fn handle_message(
config: TaskBridgeConfig,
json_str: &str,
file_upload_futures_tx: tokio::sync::mpsc::Sender<BoxFuture<'_, anyhow::Result<()>>>,
file_validation_futures_tx: tokio::sync::mpsc::Sender<(
String,
BoxFuture<'_, anyhow::Result<()>>,
)>,
) -> Result<()> {
debug!("Extracted JSON object: {json_str}");
if json_str.contains("output/save_path") {
if let Err(e) = handle_file_upload(
config,
json_str,
file_upload_futures_tx,
file_validation_futures_tx,
)
.await
{
error!("Failed to handle file upload: {e}");
}
} else {
debug!("Processing metric message");
match serde_json::from_str::<MetricInput>(json_str) {
Ok(input) => {
if let Err(e) = handle_metric(config, &input).await {
error!("Failed to handle metric: {e}");
}
}
Err(e) => {
error!("Failed to parse metric input: {json_str} {e}");
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::metrics::store::MetricsStore;
use serde_json::json;
use shared::models::metric::MetricKey;
use std::sync::Arc;
use std::time::Duration;
use tempfile::tempdir;
use tokio::io::AsyncWriteExt;
use tokio::net::UnixStream;
#[tokio::test]
async fn test_socket_creation() -> Result<()> {
let temp_dir = tempdir()?;
let socket_path = temp_dir.path().join("test.sock");
let metrics_store = Arc::new(MetricsStore::new());
let state = Arc::new(SystemState::new(None, false, 0).unwrap());
let bridge = TaskBridge::new(
Some(socket_path.to_str().unwrap()),
metrics_store.clone(),
None,
None,
None,
"test_storage_path".to_string(),
state,
None,
)
.unwrap();
// Run the bridge in background
let bridge_handle = tokio::spawn(async move { bridge.run().await });
tokio::time::sleep(Duration::from_millis(100)).await;
// Verify socket exists with correct permissions
assert!(socket_path.exists());
let metadata = fs::metadata(&socket_path)?;
let permissions = metadata.permissions();
assert_eq!(permissions.mode() & 0o777, 0o666);
// Cleanup
bridge_handle.abort();
Ok(())
}
#[tokio::test]
async fn test_client_connection() -> Result<()> {
let temp_dir = tempdir()?;
let socket_path = temp_dir.path().join("test.sock");
let metrics_store = Arc::new(MetricsStore::new());
let state = Arc::new(SystemState::new(None, false, 0).unwrap());
let bridge = TaskBridge::new(
Some(socket_path.to_str().unwrap()),
metrics_store.clone(),
None,
None,
None,
"test_storage_path".to_string(),
state,
None,
)
.unwrap();
// Run bridge in background
let bridge_handle = tokio::spawn(async move { bridge.run().await });
tokio::time::sleep(Duration::from_millis(100)).await;
// Test client connection
let stream = UnixStream::connect(&socket_path).await?;
// Log stream output for debugging
debug!("Connected to stream: {:?}", stream.peer_addr());
assert!(stream.peer_addr().is_ok());
bridge_handle.abort();
Ok(())
}
#[tokio::test]
async fn test_message_sending() -> Result<()> {
let temp_dir = tempdir()?;
let socket_path = temp_dir.path().join("test.sock");
let metrics_store = Arc::new(MetricsStore::new());
let state = Arc::new(SystemState::new(None, false, 0).unwrap());
let bridge = TaskBridge::new(
Some(socket_path.to_str().unwrap()),
metrics_store.clone(),
None,
None,
None,
"test_storage_path".to_string(),
state,
None,
)
.unwrap();
let bridge_handle = tokio::spawn(async move { bridge.run().await });
tokio::time::sleep(Duration::from_millis(100)).await;
let mut stream = UnixStream::connect(&socket_path).await?;
let data = json!({
"task_id": "1234",
"test_label": 10.0,
"test_label2": 20.0,
});
let sample_metric = serde_json::to_string(&data)?;
debug!("Sending {:?}", sample_metric);
let msg = format!("{}{}", sample_metric, "\n");
stream.write_all(msg.as_bytes()).await?;
stream.flush().await?;
tokio::time::sleep(Duration::from_millis(100)).await;
let all_metrics = metrics_store.get_all_metrics().await;
let key = MetricKey {
task_id: "1234".to_string(),
label: "test_label".to_string(),
};
assert!(all_metrics.contains_key(&key));
assert_eq!(all_metrics.get(&key).unwrap(), &10.0);
bridge_handle.abort();
Ok(())
}
#[tokio::test]
async fn test_file_submission() -> Result<()> {
let temp_dir = tempdir()?;
let socket_path = temp_dir.path().join("test.sock");
let metrics_store = Arc::new(MetricsStore::new());
let state = Arc::new(SystemState::new(None, false, 0).unwrap());
let bridge = TaskBridge::new(
Some(socket_path.to_str().unwrap()),
metrics_store.clone(),
None,
None,
None,
"test_storage_path".to_string(),
state,
None,
)
.unwrap();
let bridge_handle = tokio::spawn(async move { bridge.run().await });
tokio::time::sleep(Duration::from_millis(100)).await;
let mut stream = UnixStream::connect(&socket_path).await?;
let json = json!({
"task_id": "1234",
"output/save_path": "test.txt",
"output/sha256": "1234567890",
"output/output_flops": 1500.0,
"output/input_flops": 2500.0,
});
let sample_metric = serde_json::to_string(&json)?;
debug!("Sending {:?}", sample_metric);
let msg = format!("{}{}", sample_metric, "\n");
stream.write_all(msg.as_bytes()).await?;
stream.flush().await?;
tokio::time::sleep(Duration::from_millis(100)).await;
let all_metrics = metrics_store.get_all_metrics().await;
assert!(
all_metrics.is_empty(),
"Expected metrics to be empty but found: {:?}",
all_metrics
);
bridge_handle.abort();
Ok(())
}
#[tokio::test]
async fn test_multiple_clients() -> Result<()> {
let temp_dir = tempdir()?;
let socket_path = temp_dir.path().join("test.sock");
let metrics_store = Arc::new(MetricsStore::new());
let state = Arc::new(SystemState::new(None, false, 0).unwrap());
let bridge = TaskBridge::new(
Some(socket_path.to_str().unwrap()),
metrics_store.clone(),
None,
None,
None,
"test_storage_path".to_string(),
state,
None,
)
.unwrap();
let bridge_handle = tokio::spawn(async move { bridge.run().await });
tokio::time::sleep(Duration::from_millis(100)).await;
let mut clients = vec![];
for _ in 0..3 {
let stream = UnixStream::connect(&socket_path).await?;
clients.push(stream);
}
for client in &clients {
assert!(client.peer_addr().is_ok());
}
bridge_handle.abort();
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/taskbridge/mod.rs | crates/worker/src/docker/taskbridge/mod.rs | pub(crate) mod bridge;
pub(crate) mod file_handler;
mod json_helper;
pub(crate) use bridge::TaskBridge;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/docker/taskbridge/file_handler.rs | crates/worker/src/docker/taskbridge/file_handler.rs | use crate::state::system_state::SystemState;
use alloy::primitives::{Address, U256};
use anyhow::Context as _;
use anyhow::Result;
use log::{debug, error, info, warn};
use reqwest::header::HeaderValue;
use reqwest::Client;
use rust_ipfs::Ipfs;
use shared::models::node::Node;
use shared::models::storage::RequestUploadRequest;
use shared::security::request_signer::sign_request_with_nonce;
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::contracts::helpers::utils::retry_call;
use shared::web3::wallet::{Wallet, WalletProvider};
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
/// Handles a file upload request
pub(crate) async fn handle_file_upload(
storage_path: String,
task_id: String,
file_name: String,
wallet: Wallet,
state: Arc<SystemState>,
ipfs: Option<Ipfs>,
) -> Result<()> {
info!("📄 Received file upload request: {file_name}");
// Get orchestrator endpoint
let endpoint = match state.get_heartbeat_endpoint().await {
Some(ep) => {
let clean_ep = ep.replace("/heartbeat", "");
info!("Using orchestrator endpoint: {clean_ep}");
clean_ep
}
None => {
error!("Orchestrator endpoint is not set - cannot upload file.");
return Err(anyhow::anyhow!("Orchestrator endpoint not set"));
}
};
// Clean filename by removing /data prefix if present
let clean_file_name = file_name.trim_start_matches("/data/");
info!("Clean file name: {clean_file_name}");
let task_dir = format!("prime-task-{task_id}");
let file_path = Path::new(&storage_path)
.join(&task_dir)
.join("data")
.join(clean_file_name);
let file = file_path.to_string_lossy().to_string();
info!("Full file path: {file}");
// Get file size
let file_size = match std::fs::metadata(&file) {
Ok(metadata) => {
let size = metadata.len();
info!("File size: {size} bytes");
size
}
Err(e) => {
error!("Failed to get file metadata: {e}");
return Err(anyhow::anyhow!("Failed to get file metadata: {}", e));
}
};
// Calculate SHA
let file_sha = match tokio::fs::read(&file).await {
Ok(contents) => {
use sha2::{Digest, Sha256};
let mut hasher = Sha256::new();
hasher.update(&contents);
let sha = format!("{:x}", hasher.finalize());
info!("Calculated file SHA: {sha}");
sha
}
Err(e) => {
error!("Failed to read file for SHA calculation: {e}");
return Err(anyhow::anyhow!("Failed to read file: {}", e));
}
};
// Create upload request
let client = Client::new();
let request = RequestUploadRequest {
file_name: file_name.to_string(),
file_size,
file_type: "application/json".to_string(), // Assume JSON
sha256: file_sha.clone(),
task_id: task_id.to_string(),
};
let signed_url = get_signed_url_for_upload(request, wallet.clone(), &endpoint, &client)
.await
.context("failed to get signed URL for upload")?;
info!("Reading file contents for S3 upload: {file}");
let file_contents = match tokio::fs::read(&file).await {
Ok(contents) => {
info!("Successfully read file ({} bytes)", contents.len());
contents
}
Err(e) => {
anyhow::bail!("failed to read file: {}", e);
}
};
if let Some(ipfs) = ipfs {
handle_file_upload_ipfs(file_contents.clone(), &ipfs)
.await
.context("failed to upload file to IPFS")?;
}
handle_file_upload_s3(file_contents, signed_url, &client)
.await
.context("failed to upload file to S3")
}
async fn get_signed_url_for_upload(
request: RequestUploadRequest,
wallet: Wallet,
endpoint: &str,
client: &Client,
) -> Result<String> {
// Retry configuration
const MAX_RETRIES: usize = 5;
const INITIAL_RETRY_DELAY_MS: u64 = 1000; // 1 second
// Retry loop for getting signed URL
let mut retry_count = 0;
let mut last_error = None;
let mut signed_url = None;
info!("Starting signed URL request with max {MAX_RETRIES} retries");
while retry_count < MAX_RETRIES {
if retry_count > 0 {
let delay = INITIAL_RETRY_DELAY_MS * (1 << retry_count); // Exponential backoff
warn!(
"Retrying upload request (attempt {}/{}), waiting for {}ms",
retry_count + 1,
MAX_RETRIES,
delay
);
tokio::time::sleep(Duration::from_millis(delay)).await;
}
// Sign request
let request_value = match serde_json::to_value(&request) {
Ok(val) => val,
Err(e) => {
error!("Failed to serialize request: {e}");
return Err(anyhow::anyhow!(e));
}
};
let signature =
match sign_request_with_nonce("/storage/request-upload", &wallet, Some(&request_value))
.await
{
Ok(sig) => {
debug!("Request signed successfully: {}", sig.signature);
sig
}
Err(e) => {
error!("Failed to sign request: {e}");
last_error = Some(anyhow::anyhow!(e.to_string()));
retry_count += 1;
continue;
}
};
// Prepare headers
let mut headers = reqwest::header::HeaderMap::new();
match HeaderValue::from_str(&wallet.address().to_string()) {
Ok(val) => {
headers.insert("x-address", val);
debug!("Added x-address header: {}", wallet.address());
}
Err(e) => {
error!("Failed to create header value: {e}");
last_error = Some(anyhow::anyhow!(e));
retry_count += 1;
continue;
}
}
match HeaderValue::from_str(&signature.signature) {
Ok(val) => {
headers.insert("x-signature", val);
debug!("Added x-signature header");
}
Err(e) => {
error!("Failed to create signature header: {e}");
last_error = Some(anyhow::anyhow!(e));
retry_count += 1;
continue;
}
}
// Create upload URL
let upload_url = format!("{endpoint}/storage/request-upload");
debug!("Requesting signed URL from: {upload_url}");
// Send request
debug!(
"Sending request for signed URL (attempt {}/{})",
retry_count + 1,
MAX_RETRIES
);
let response = match client
.post(&upload_url)
.json(&signature.data)
.headers(headers)
.send()
.await
{
Ok(resp) => {
debug!("Received response with status: {}", resp.status());
resp
}
Err(e) => {
error!("Failed to send upload request: {e}");
last_error = Some(anyhow::anyhow!(e));
retry_count += 1;
continue;
}
};
// Process response
let json = match response.json::<serde_json::Value>().await {
Ok(j) => {
debug!("Parsed response JSON: {j:?}");
j
}
Err(e) => {
error!("Failed to parse response: {e}");
last_error = Some(anyhow::anyhow!(e));
retry_count += 1;
continue;
}
};
if let Some(url) = json["signed_url"].as_str() {
signed_url = Some(url.to_string());
debug!("Got signed URL for upload (length: {})", url.len());
debug!("Signed URL: {url}");
break;
} else {
error!("Missing signed_url in response: {json:?}");
last_error = Some(anyhow::anyhow!("Missing signed_url in response"));
retry_count += 1;
continue;
}
}
let Some(signed_url) = signed_url else {
error!("Failed to get signed URL after {MAX_RETRIES} attempts");
return Err(last_error.unwrap_or_else(|| {
anyhow::anyhow!("Failed to get signed URL after {} attempts", MAX_RETRIES)
}));
};
Ok(signed_url)
}
async fn handle_file_upload_s3(
file_contents: Vec<u8>,
signed_url: String,
client: &Client,
) -> Result<()> {
// Retry configuration
const MAX_RETRIES: usize = 5;
const INITIAL_RETRY_DELAY_MS: u64 = 1000; // 1 second
// Retry loop for uploading file to S3
let mut retry_count = 0;
let mut last_error = None;
debug!("Starting S3 upload with max {MAX_RETRIES} retries");
while retry_count < MAX_RETRIES {
if retry_count > 0 {
let delay = INITIAL_RETRY_DELAY_MS * (1 << retry_count); // Exponential backoff
warn!(
"Retrying S3 upload (attempt {}/{}), waiting for {}ms",
retry_count + 1,
MAX_RETRIES,
delay
);
tokio::time::sleep(Duration::from_millis(delay)).await;
}
// Upload file to S3 using signed URL
info!(
"Uploading file to S3 (attempt {}/{})",
retry_count + 1,
MAX_RETRIES
);
match client
.put(&signed_url)
.body(file_contents.clone())
.header("Content-Type", "application/json")
.send()
.await
{
Ok(resp) => {
let status = resp.status();
debug!("S3 upload response status: {status}");
if status.is_success() {
info!("Successfully uploaded file to S3");
return Ok(());
} else {
let error_text = resp
.text()
.await
.unwrap_or_else(|_| "Unknown error".to_string());
error!("S3 upload failed with status {status}: {error_text}");
last_error = Some(anyhow::anyhow!(
"S3 upload failed: {} - {}",
status,
error_text
));
retry_count += 1;
continue;
}
}
Err(e) => {
error!("Failed to upload to S3: {e}");
last_error = Some(anyhow::anyhow!(e));
retry_count += 1;
continue;
}
}
}
error!("Failed to upload file to S3 after {MAX_RETRIES} attempts");
Err(last_error.unwrap_or_else(|| {
anyhow::anyhow!("Failed to upload file to S3 after {} attempts", MAX_RETRIES)
}))
}
async fn handle_file_upload_ipfs(file_contents: Vec<u8>, ipfs: &Ipfs) -> Result<cid::Cid> {
let put = ipfs
.put_dag(file_contents)
.codec(rust_ipfs::block::BlockCodec::Raw);
let cid = put.await.context("failed to put data into IPFS")?;
info!("successfully uploaded file to IPFS with CID: {cid}");
ipfs.provide(cid)
.await
.context("failed to provide CID on IPFS")?;
Ok(cid)
}
/// Handles a file validation request
pub(crate) async fn handle_file_validation(
file_sha: String,
contracts: Contracts<WalletProvider>,
node: Node,
provider: WalletProvider,
work_units: f64,
) -> Result<()> {
info!("📄 Received file SHA for validation: {file_sha}");
info!(
"Node address: {}, Pool ID: {}",
node.id, node.compute_pool_id
);
let pool_id = node.compute_pool_id;
let node_address = &node.id;
let decoded_sha = match hex::decode(file_sha) {
Ok(sha) => {
debug!("Decoded SHA bytes: {sha:?}");
sha
}
Err(e) => {
error!("Failed to decode SHA hex string: {e}");
return Err(anyhow::anyhow!("Failed to decode SHA: {}", e));
}
};
let node_addr = match Address::from_str(node_address) {
Ok(addr) => addr,
Err(e) => {
error!("Failed to parse node address: {e}");
return Err(anyhow::anyhow!("Invalid node address: {}", e));
}
};
let pool_id_u256 = U256::from(pool_id);
let call = contracts
.compute_pool
.build_work_submission_call(
pool_id_u256,
node_addr,
decoded_sha.to_vec(),
U256::from(work_units),
)
.unwrap();
let tx = retry_call(call, 20, provider.clone(), None)
.await
.map_err(|e| anyhow::anyhow!("Failed to submit work: {}", e))?;
info!("Successfully submitted work to blockchain: {tx:?}");
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/metrics/store.rs | crates/worker/src/metrics/store.rs | use anyhow::Result;
use shared::models::metric::{MetricEntry, MetricKey};
use std::collections::HashMap;
use tokio::sync::RwLock;
#[derive(Debug)]
pub(crate) struct MetricsStore {
metrics: RwLock<HashMap<MetricKey, f64>>,
}
impl MetricsStore {
pub(crate) fn new() -> Self {
Self {
metrics: RwLock::new(HashMap::new()),
}
}
pub(crate) async fn update_metric(
&self,
task_id: String,
label: String,
value: f64,
) -> Result<()> {
if !value.is_finite() {
anyhow::bail!("Value must be a finite number");
}
let mut metrics = self.metrics.write().await;
let key = MetricKey { task_id, label };
metrics.insert(key, value);
Ok(())
}
pub(crate) async fn get_metrics_for_task(&self, task_id: String) -> Vec<MetricEntry> {
self.metrics
.read()
.await
.iter()
.filter(|(k, _)| k.task_id == task_id)
.map(|(k, &v)| MetricEntry {
key: k.clone(),
value: v,
})
.collect()
}
pub(crate) async fn clear_metrics_for_task(&self, task_id: &str) {
let mut metrics = self.metrics.write().await;
metrics.retain(|key, _| key.task_id != task_id);
}
#[allow(dead_code)]
pub(crate) async fn get_all_metrics(&self) -> HashMap<MetricKey, f64> {
let metrics = self.metrics.read().await;
metrics.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_metrics_store() -> Result<()> {
let store = MetricsStore::new();
store
.update_metric("task1".to_string(), "progress".to_string(), 1.0)
.await?;
store
.update_metric("task1".to_string(), "cpu_usage".to_string(), 90.0)
.await?;
store
.update_metric("task2".to_string(), "test".to_string(), 2.0)
.await?;
let metrics = store.get_metrics_for_task("task1".to_string()).await;
assert_eq!(metrics.len(), 2);
let all_metrics = store.get_all_metrics().await;
assert_eq!(all_metrics.len(), 3);
// Test invalid value
let result = store
.update_metric("task1".to_string(), "invalid".to_string(), f64::INFINITY)
.await;
assert!(result.is_err());
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/metrics/mod.rs | crates/worker/src/metrics/mod.rs | pub(crate) mod store;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/console/mod.rs | crates/worker/src/console/mod.rs | pub(crate) use console_logger::*;
mod console_logger;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/console/console_logger.rs | crates/worker/src/console/console_logger.rs | use console::{style, Term};
use std::cmp;
use unicode_width::UnicodeWidthStr;
pub(crate) struct Console;
impl Console {
/// Maximum content width for the box.
const MAX_WIDTH: usize = 80;
/// Calculates the content width for boxes.
/// It uses the available terminal width (minus a margin) and caps it at MAX_WIDTH.
fn get_content_width() -> usize {
let term_width = Term::stdout().size().1 as usize;
// Leave a margin of 10 columns.
let available = if term_width > 10 {
term_width - 10
} else {
term_width
};
cmp::min(available, Self::MAX_WIDTH)
}
/// Centers a given text within a given width based on its display width.
fn center_text(text: &str, width: usize) -> String {
let text_width = UnicodeWidthStr::width(text);
if width > text_width {
let total_padding = width - text_width;
let left = total_padding / 2;
let right = total_padding - left;
format!("{}{}{}", " ".repeat(left), text, " ".repeat(right))
} else {
text.to_string()
}
}
/// Prints a section header as an aligned box.
pub(crate) fn section(title: &str) {
let content_width = Self::get_content_width();
let top_border = format!("╔{}╗", "═".repeat(content_width));
let centered_title = Self::center_text(title, content_width);
let middle_line = format!("║{centered_title}║");
let bottom_border = format!("╚{}╝", "═".repeat(content_width));
println!();
println!("{}", style(top_border).white().bold());
println!("{}", style(middle_line).white().bold());
println!("{}", style(bottom_border).white().bold());
}
/// Prints a sub-title.
pub(crate) fn title(text: &str) {
println!();
println!("{}", style(text).white().bold());
}
/// Prints an informational message.
pub(crate) fn info(label: &str, value: &str) {
println!("{}: {}", style(label).dim().white(), style(value).white());
}
/// Prints a success message.
pub(crate) fn success(text: &str) {
println!("{} {}", style("✓").green().bold(), style(text).green());
}
/// Prints a warning message.
pub(crate) fn warning(text: &str) {
println!("{} {}", style("⚠").yellow().bold(), style(text).yellow());
}
/// Prints a user error message.
/// This is a special case where the error is user-facing (e.g., missing GPU, configuration issues)
/// rather than a system error. These errors are not logged to central logging systems
/// and are only displayed to the user to help them resolve the issue.
/// For actual system errors that should be tracked, use proper error logging instead.
pub(crate) fn user_error(text: &str) {
println!("{} {}", style("✗").red().bold(), style(text).red());
}
/// Prints a progress message.
pub(crate) fn progress(text: &str) {
println!("{} {}", style("→").cyan().bold(), style(text).cyan());
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/utils/mod.rs | crates/worker/src/utils/mod.rs | pub(crate) mod logging;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/utils/logging.rs | crates/worker/src/utils/logging.rs | use log::{debug, LevelFilter};
use tracing_subscriber::filter::EnvFilter as TracingEnvFilter;
use tracing_subscriber::fmt;
use tracing_subscriber::prelude::*;
use url::Url;
use crate::cli::command::Commands;
use crate::cli::Cli;
use anyhow::Result;
use std::time::{SystemTime, UNIX_EPOCH};
use time::macros::format_description;
use tracing_subscriber::fmt::time::FormatTime;
struct SimpleTimeFormatter;
impl FormatTime for SimpleTimeFormatter {
fn format_time(&self, w: &mut tracing_subscriber::fmt::format::Writer<'_>) -> std::fmt::Result {
// Get current time
let now = SystemTime::now();
let timestamp = now.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
// Convert to time::OffsetDateTime
let datetime = time::OffsetDateTime::from_unix_timestamp(timestamp as i64)
.unwrap_or(time::OffsetDateTime::UNIX_EPOCH);
// Format as hh:mm:ss
let format = format_description!("[hour]:[minute]:[second]");
let formatted = datetime
.format(format)
.unwrap_or_else(|_| String::from("??:??:??"));
write!(w, "{formatted}")
}
}
pub fn setup_logging(cli: Option<&Cli>) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Default log level
let mut log_level = LevelFilter::Info;
let mut loki_url: Option<String> = None;
let mut external_ip = None;
let mut compute_pool = None;
let mut port = None;
// Extract command-specific parameters if CLI is provided
if let Some(cli) = cli {
if let Commands::Run {
external_ip: cmd_external_ip,
port: cmd_port,
compute_pool_id: cmd_compute_pool_id,
loki_url: cmd_loki_url,
log_level: cmd_log_level,
..
} = &cli.command
{
compute_pool = Some(*cmd_compute_pool_id);
external_ip = Some(cmd_external_ip.clone());
port = Some(*cmd_port);
match cmd_loki_url {
Some(url) => loki_url = Some(url.clone()),
None => loki_url = None,
}
match cmd_log_level {
Some(level) => {
log_level = level.parse()?;
}
None => log_level = LevelFilter::Info,
}
}
}
let env_filter = TracingEnvFilter::from_default_env()
.add_directive(format!("{log_level}").parse()?)
.add_directive("reqwest=warn".parse()?)
.add_directive("hyper=warn".parse()?)
.add_directive("hyper_util=warn".parse()?)
.add_directive("bollard=warn".parse()?)
.add_directive("alloy=warn".parse()?)
.add_directive("quinn=error".parse()?)
.add_directive("quinn_proto=error".parse()?)
.add_directive("tracing::span=warn".parse()?);
let fmt_layer = fmt::layer()
.with_target(false)
.with_level(true)
.with_ansi(true)
.with_thread_ids(false)
.with_thread_names(false)
.with_file(false)
.with_line_number(false)
.with_timer(SimpleTimeFormatter)
.compact();
let registry = tracing_subscriber::registry()
.with(env_filter)
.with(fmt_layer);
if let Some(loki_url_str) = loki_url {
let loki_url_parsed = Url::parse(&loki_url_str)?;
let ip: String = match external_ip {
Some(Some(ip_addr)) => ip_addr.to_string(),
_ => "Unknown".to_string(),
};
let (loki_layer, task) = tracing_loki::builder()
.label("app", "prime-worker")?
.label("version", env!("CARGO_PKG_VERSION"))?
.label("external_ip", ip)?
.label("compute_pool", compute_pool.unwrap_or_default().to_string())?
.label("port", port.unwrap_or_default().to_string())?
.build_url(loki_url_parsed)?;
tokio::spawn(task);
registry.with(loki_layer).init();
debug!("Logging to console and Loki at {loki_url_str}");
} else {
registry.init();
}
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/operations/mod.rs | crates/worker/src/operations/mod.rs | pub(crate) mod compute_node;
pub(crate) mod heartbeat;
pub(crate) mod provider;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/operations/compute_node.rs | crates/worker/src/operations/compute_node.rs | use crate::{console::Console, state::system_state::SystemState};
use alloy::{primitives::utils::keccak256 as keccak, primitives::U256, signers::Signer};
use anyhow::Result;
use shared::web3::wallet::Wallet;
use shared::web3::{contracts::core::builder::Contracts, wallet::WalletProvider};
use std::sync::Arc;
use tokio::time::{sleep, Duration};
use tokio_util::sync::CancellationToken;
pub(crate) struct ComputeNodeOperations<'c> {
provider_wallet: &'c Wallet,
node_wallet: &'c Wallet,
contracts: Contracts<WalletProvider>,
system_state: Arc<SystemState>,
}
impl<'c> ComputeNodeOperations<'c> {
pub(crate) fn new(
provider_wallet: &'c Wallet,
node_wallet: &'c Wallet,
contracts: Contracts<WalletProvider>,
system_state: Arc<SystemState>,
) -> Self {
Self {
provider_wallet,
node_wallet,
contracts,
system_state,
}
}
pub(crate) fn start_monitoring(
&self,
cancellation_token: CancellationToken,
pool_id: u32,
) -> Result<()> {
let provider_address = self.provider_wallet.wallet.default_signer().address();
let node_address = self.node_wallet.wallet.default_signer().address();
let contracts = self.contracts.clone();
let system_state = self.system_state.clone();
let mut last_active = false;
let mut last_validated = false;
let mut last_claimable = None;
let mut last_locked = None;
let mut first_check = true;
tokio::spawn(async move {
loop {
tokio::select! {
_ = cancellation_token.cancelled() => {
Console::info("Monitor", "Shutting down node status monitor...");
break;
}
_ = async {
match contracts.compute_registry.get_node(provider_address, node_address).await {
Ok((active, validated)) => {
if first_check || active != last_active {
if !first_check {
Console::info("🔄 Chain Sync - Pool membership changed", &format!("From {last_active} to {active}"
));
} else {
Console::info("🔄 Chain Sync - Node pool membership", &format!("{active}"));
}
last_active = active;
}
let is_running = system_state.is_running().await;
if !active && is_running {
Console::warning("Node is not longer in pool, shutting down heartbeat...");
if let Err(e) = system_state.set_running(false, None).await {
log::error!("Failed to set running to false: {e:?}");
}
}
if first_check || validated != last_validated {
if !first_check {
Console::info("🔄 Chain Sync - Validation changed", &format!("From {last_validated} to {validated}"
));
} else {
Console::info("🔄 Chain Sync - Node validation", &format!("{validated}"));
}
last_validated = validated;
}
// Check rewards for the current compute pool
match contracts.compute_pool.calculate_node_rewards(
U256::from(pool_id),
node_address,
).await {
Ok((claimable, locked)) => {
if last_claimable.is_none() || last_locked.is_none() || claimable != last_claimable.unwrap() || locked != last_locked.unwrap() {
last_claimable = Some(claimable);
last_locked = Some(locked);
let claimable_formatted = claimable.to_string().parse::<f64>().unwrap_or(0.0) / 10f64.powf(18.0);
let locked_formatted = locked.to_string().parse::<f64>().unwrap_or(0.0) / 10f64.powf(18.0);
Console::info("Rewards", &format!("{claimable_formatted} claimable, {locked_formatted} locked"));
}
}
Err(e) => {
log::debug!("Failed to check rewards for pool {pool_id}: {e}");
}
}
first_check = false;
}
Err(e) => {
log::error!("Failed to get node status: {e}");
}
}
sleep(Duration::from_secs(5)).await;
} => {}
}
}
});
Ok(())
}
pub(crate) async fn check_compute_node_exists(
&self,
) -> Result<bool, Box<dyn std::error::Error>> {
let compute_node = self
.contracts
.compute_registry
.get_node(
self.provider_wallet.wallet.default_signer().address(),
self.node_wallet.wallet.default_signer().address(),
)
.await;
match compute_node {
Ok(_) => Ok(true),
Err(_) => Ok(false),
}
}
// Returns true if the compute node was added, false if it already exists
pub(crate) async fn add_compute_node(
&self,
compute_units: U256,
) -> Result<bool, Box<dyn std::error::Error>> {
Console::title("🔄 Adding compute node");
if self.check_compute_node_exists().await? {
return Ok(false);
}
Console::progress("Adding compute node");
let provider_address = self.provider_wallet.wallet.default_signer().address();
let node_address = self.node_wallet.wallet.default_signer().address();
let digest = keccak([provider_address.as_slice(), node_address.as_slice()].concat());
let signature = self
.node_wallet
.signer
.sign_message(digest.as_slice())
.await?
.as_bytes();
// Create the signature bytes
let add_node_tx = self
.contracts
.prime_network
.add_compute_node(node_address, compute_units, signature.to_vec())
.await?;
Console::success(&format!("Add node tx: {add_node_tx:?}"));
Ok(true)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/operations/provider.rs | crates/worker/src/operations/provider.rs | use crate::console::Console;
use alloy::primitives::utils::format_ether;
use alloy::primitives::{Address, U256};
use log::error;
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::wallet::{Wallet, WalletProvider};
use std::io::Write;
use std::{fmt, io};
use tokio::time::{sleep, Duration};
use tokio_util::sync::CancellationToken;
pub(crate) struct ProviderOperations {
wallet: Wallet,
contracts: Contracts<WalletProvider>,
auto_accept: bool,
}
impl ProviderOperations {
pub(crate) fn new(
wallet: Wallet,
contracts: Contracts<WalletProvider>,
auto_accept: bool,
) -> Self {
Self {
wallet,
contracts,
auto_accept,
}
}
fn prompt_user_confirmation(&self, message: &str) -> bool {
if self.auto_accept {
return true;
}
print!("{message} [y/N]: ");
io::stdout().flush().unwrap();
let mut input = String::new();
if io::stdin().read_line(&mut input).is_ok() {
input.trim().to_lowercase() == "y"
} else {
false
}
}
pub(crate) fn start_monitoring(&self, cancellation_token: CancellationToken) {
let provider_address = self.wallet.wallet.default_signer().address();
let contracts = self.contracts.clone();
// Only start monitoring if we have a stake manager
let mut last_stake = U256::ZERO;
let mut last_balance = U256::ZERO;
let mut last_whitelist_status = false;
let mut first_check = true;
tokio::spawn(async move {
loop {
tokio::select! {
_ = cancellation_token.cancelled() => {
Console::info("Monitor", "Shutting down provider status monitor...");
break;
}
_ = async {
let Some(stake_manager) = contracts.stake_manager.as_ref() else {
Console::user_error("Cannot start monitoring - stake manager not initialized");
return;
};
// Monitor stake
match stake_manager.get_stake(provider_address).await {
Ok(stake) => {
if first_check || stake != last_stake {
Console::info("🔄 Chain Sync - Provider stake", &format_ether(stake));
if !first_check {
if stake < last_stake {
Console::warning(&format!("Stake decreased - possible slashing detected: From {} to {}",
format_ether(last_stake),
format_ether(stake)
));
if stake == U256::ZERO {
Console::warning("Stake is 0 - you might have to restart the node to increase your stake (if you still have balance left)");
}
} else {
Console::info("🔄 Chain Sync - Stake changed", &format!("From {} to {}",
format_ether(last_stake),
format_ether(stake)
));
}
}
last_stake = stake;
}
Some(stake)
},
Err(e) => {
error!("Failed to get stake: {e}");
None
}
};
// Monitor balance
match contracts.ai_token.balance_of(provider_address).await {
Ok(balance) => {
if first_check || balance != last_balance {
Console::info("🔄 Chain Sync - Balance", &format_ether(balance));
if !first_check {
Console::info("🔄 Chain Sync - Balance changed", &format!("From {} to {}",
format_ether(last_balance),
format_ether(balance)
));
}
last_balance = balance;
}
Some(balance)
},
Err(e) => {
error!("Failed to get balance: {e}");
None
}
};
// Monitor whitelist status
match contracts.compute_registry.get_provider(provider_address).await {
Ok(provider) => {
if first_check || provider.is_whitelisted != last_whitelist_status {
Console::info("🔄 Chain Sync - Whitelist status", &format!("{}", provider.is_whitelisted));
if !first_check {
Console::info("🔄 Chain Sync - Whitelist status changed", &format!("From {} to {}",
last_whitelist_status,
provider.is_whitelisted
));
}
last_whitelist_status = provider.is_whitelisted;
}
},
Err(e) => {
error!("Failed to get provider whitelist status: {e}");
}
};
first_check = false;
sleep(Duration::from_secs(5)).await;
} => {}
}
}
});
}
pub(crate) async fn check_provider_exists(&self) -> Result<bool, ProviderError> {
let address = self.wallet.wallet.default_signer().address();
let provider = self
.contracts
.compute_registry
.get_provider(address)
.await
.map_err(|_| ProviderError::Other)?;
Ok(provider.provider_address != Address::default())
}
pub(crate) async fn check_provider_whitelisted(&self) -> Result<bool, ProviderError> {
let address = self.wallet.wallet.default_signer().address();
let provider = self
.contracts
.compute_registry
.get_provider(address)
.await
.map_err(|_| ProviderError::Other)?;
Ok(provider.is_whitelisted)
}
pub(crate) async fn retry_register_provider(
&self,
stake: U256,
max_attempts: u32,
cancellation_token: CancellationToken,
) -> Result<(), ProviderError> {
Console::title("Registering Provider");
let mut attempts = 0;
while attempts < max_attempts || max_attempts == 0 {
Console::progress("Registering provider...");
match self.register_provider(stake).await {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
ProviderError::NotWhitelisted | ProviderError::InsufficientBalance => {
Console::info("Info", "Retrying in 10 seconds...");
tokio::select! {
_ = tokio::time::sleep(tokio::time::Duration::from_secs(10)) => {}
_ = cancellation_token.cancelled() => {
return Err(e);
}
}
attempts += 1;
continue;
}
_ => return Err(e),
},
}
}
log::error!("❌ Failed to register provider after {attempts} attempts");
Err(ProviderError::Other)
}
pub(crate) async fn register_provider(&self, stake: U256) -> Result<(), ProviderError> {
let address = self.wallet.wallet.default_signer().address();
let balance: U256 = self
.contracts
.ai_token
.balance_of(address)
.await
.map_err(|_| ProviderError::Other)?;
let eth_balance = self
.wallet
.get_balance()
.await
.map_err(|_| ProviderError::Other)?;
let provider_exists = self.check_provider_exists().await?;
if !provider_exists {
Console::info("Balance", &format_ether(balance));
Console::info(
"ETH Balance",
&format!("{} ETH", format_ether(U256::from(eth_balance))),
);
if balance < stake {
Console::user_error(&format!(
"Insufficient balance for stake: {}",
format_ether(stake)
));
return Err(ProviderError::InsufficientBalance);
}
if !self.prompt_user_confirmation(&format!(
"Do you want to approve staking {}?",
format_ether(stake)
)) {
Console::info("Operation cancelled by user", "Staking approval declined");
return Err(ProviderError::UserCancelled);
}
Console::progress("Approving for Stake transaction");
self.contracts
.ai_token
.approve(stake)
.await
.map_err(|_| ProviderError::Other)?;
Console::progress("Registering Provider");
let Ok(register_tx) = self.contracts.prime_network.register_provider(stake).await
else {
return Err(ProviderError::Other);
};
Console::info("Registration tx", &format!("{register_tx:?}"));
}
// Get provider details again - cleanup later
Console::progress("Getting provider details");
let _ = self
.contracts
.compute_registry
.get_provider(address)
.await
.map_err(|_| ProviderError::Other)?;
let provider_exists = self.check_provider_exists().await?;
if !provider_exists {
Console::info("Balance", &format_ether(balance));
Console::info(
"ETH Balance",
&format!("{} ETH", format_ether(U256::from(eth_balance))),
);
if balance < stake {
Console::user_error(&format!(
"Insufficient balance for stake: {}",
format_ether(stake)
));
return Err(ProviderError::InsufficientBalance);
}
if !self.prompt_user_confirmation(&format!(
"Do you want to approve staking {}?",
format_ether(stake)
)) {
Console::info("Operation cancelled by user", "Staking approval declined");
return Err(ProviderError::UserCancelled);
}
Console::progress("Approving Stake transaction");
self.contracts.ai_token.approve(stake).await.map_err(|e| {
error!("Failed to approve stake: {e}");
ProviderError::Other
})?;
Console::progress("Registering Provider");
let register_tx = match self.contracts.prime_network.register_provider(stake).await {
Ok(tx) => tx,
Err(e) => {
error!("Registration Error: {e}");
return Err(ProviderError::Other);
}
};
Console::info("Registration tx", &format!("{register_tx:?}"));
}
let provider = self
.contracts
.compute_registry
.get_provider(address)
.await
.map_err(|_| ProviderError::Other)?;
let provider_exists = provider.provider_address != Address::default();
if !provider_exists {
Console::user_error(
"Provider could not be registered. Please ensure your balance is high enough.",
);
return Err(ProviderError::Other);
}
Console::success("Provider registered");
if !provider.is_whitelisted {
Console::user_error("Provider is not whitelisted yet.");
return Err(ProviderError::NotWhitelisted);
}
Ok(())
}
pub(crate) async fn increase_stake(&self, additional_stake: U256) -> Result<(), ProviderError> {
Console::title("💰 Increasing Provider Stake");
let address = self.wallet.wallet.default_signer().address();
let balance: U256 = self
.contracts
.ai_token
.balance_of(address)
.await
.map_err(|_| ProviderError::Other)?;
Console::info("Current Balance", &format_ether(balance));
Console::info("Additional stake amount", &format_ether(additional_stake));
if balance < additional_stake {
Console::user_error("Insufficient balance for stake increase");
return Err(ProviderError::Other);
}
if !self.prompt_user_confirmation(&format!(
"Do you want to approve staking {} additional funds?",
format_ether(additional_stake)
)) {
Console::info("Operation cancelled by user", "Staking approval declined");
return Err(ProviderError::UserCancelled);
}
Console::progress("Approving additional stake");
let approve_tx = self
.contracts
.ai_token
.approve(additional_stake)
.await
.map_err(|_| ProviderError::Other)?;
Console::info("Transaction approved", &format!("{approve_tx:?}"));
Console::progress("Increasing stake");
let stake_tx = match self.contracts.prime_network.stake(additional_stake).await {
Ok(tx) => tx,
Err(e) => {
println!("Failed to increase stake: {e:?}");
return Err(ProviderError::Other);
}
};
Console::info(
"Stake increase transaction completed: ",
&format!("{stake_tx:?}"),
);
Console::success("Provider stake increased successfully");
Ok(())
}
pub(crate) async fn reclaim_stake(&self, amount: U256) -> Result<(), ProviderError> {
Console::progress("Reclaiming stake");
let reclaim_tx = match self.contracts.prime_network.reclaim_stake(amount).await {
Ok(tx) => tx,
Err(e) => {
println!("Failed to reclaim stake: {e:?}");
return Err(ProviderError::Other);
}
};
Console::info(
"Stake reclaim transaction completed: ",
&format!("{reclaim_tx:?}"),
);
Console::success("Provider stake reclaimed successfully");
Ok(())
}
}
#[derive(Debug)]
pub(crate) enum ProviderError {
NotWhitelisted,
UserCancelled,
Other,
InsufficientBalance,
}
impl fmt::Display for ProviderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NotWhitelisted => write!(f, "Provider is not whitelisted"),
Self::UserCancelled => write!(f, "Operation cancelled by user"),
Self::Other => write!(f, "Provider could not be registered"),
Self::InsufficientBalance => write!(f, "Insufficient balance for stake"),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/operations/heartbeat/service.rs | crates/worker/src/operations/heartbeat/service.rs | use crate::docker::DockerService;
use crate::metrics::store::MetricsStore;
use crate::state::system_state::SystemState;
use crate::TaskHandles;
use log::info;
use reqwest::Client;
use shared::models::api::ApiResponse;
use shared::models::heartbeat::{HeartbeatRequest, HeartbeatResponse};
use shared::security::request_signer::sign_request_with_nonce;
use shared::web3::wallet::Wallet;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::time::{interval, Duration};
use tokio_util::sync::CancellationToken;
#[derive(Clone)]
pub(crate) struct HeartbeatService {
state: Arc<SystemState>,
interval: Duration,
client: Client,
cancellation_token: CancellationToken,
task_handles: TaskHandles,
node_wallet: Wallet,
docker_service: Arc<DockerService>,
metrics_store: Arc<MetricsStore>,
}
#[derive(Debug, Clone, thiserror::Error)]
pub(crate) enum HeartbeatError {
#[error("HTTP request failed")]
RequestFailed,
#[error("Service initialization failed")]
InitFailed,
}
impl HeartbeatService {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
interval: Duration,
cancellation_token: CancellationToken,
task_handles: TaskHandles,
node_wallet: Wallet,
docker_service: Arc<DockerService>,
metrics_store: Arc<MetricsStore>,
state: Arc<SystemState>,
) -> Result<Arc<Self>, HeartbeatError> {
let client = Client::builder()
.timeout(Duration::from_secs(20))
.build()
.map_err(|_| HeartbeatError::InitFailed)?; // Adjusted to match the expected error type
Ok(Arc::new(Self {
state,
interval,
client,
cancellation_token,
task_handles,
node_wallet,
docker_service,
metrics_store,
}))
}
pub(crate) async fn activate_heartbeat_if_endpoint_exists(&self) {
if let Some(endpoint) = self.state.get_heartbeat_endpoint().await {
info!("Starting heartbeat from recovered state");
self.start(endpoint).await.unwrap();
}
}
pub(crate) async fn start(&self, endpoint: String) -> Result<(), HeartbeatError> {
if self.state.is_running().await {
return Ok(());
}
if let Err(e) = self.state.set_running(true, Some(endpoint)).await {
log::error!("Failed to set running to true: {e:?}");
}
let state = self.state.clone();
let client = self.client.clone();
let interval_duration = self.interval;
let cancellation_token = self.cancellation_token.clone();
let docker_service = self.docker_service.clone();
let wallet_clone = self.node_wallet.clone();
let metrics_store = self.metrics_store.clone();
let handle = tokio::spawn(async move {
let mut interval = interval(interval_duration);
let mut had_error = false;
let mut first_start = true;
loop {
tokio::select! {
_ = interval.tick() => {
if !state.is_running().await {
break;
}
match send_heartbeat(&client, state.get_heartbeat_endpoint().await, wallet_clone.clone(), docker_service.clone(), metrics_store.clone(), state.get_p2p_id()).await {
Ok(_) => {
state.update_last_heartbeat().await;
if had_error {
log::info!("Orchestrator sync restored - connection is healthy again");
had_error = false;
} else if first_start {
log::info!("Successfully connected to orchestrator");
first_start = false;
} else {
log::debug!("Synced with orchestrator");
}
}
Err(e) => {
log::error!("{}", &format!("Failed to sync with orchestrator: {e:?}"));
had_error = true;
}
}
}
_ = cancellation_token.cancelled() => {
log::info!("Sync service received cancellation signal"); // Updated log message
if let Err(e) = state.set_running(false, None).await {
log::error!("Failed to set running to false: {e:?}");
}
break;
}
}
}
log::info!("Heartbeat service stopped");
});
let mut task_handles = self.task_handles.lock().await;
task_handles.push(handle);
Ok(())
}
#[allow(dead_code)]
pub(crate) async fn stop(&self) {
if let Err(e) = self.state.set_running(false, None).await {
log::error!("Failed to set running to false: {e:?}");
}
}
}
async fn send_heartbeat(
client: &Client,
endpoint: Option<String>,
wallet: Wallet,
docker_service: Arc<DockerService>,
metrics_store: Arc<MetricsStore>,
p2p_id: p2p::PeerId,
) -> Result<HeartbeatResponse, HeartbeatError> {
if endpoint.is_none() {
return Err(HeartbeatError::RequestFailed);
}
let current_task_state = docker_service.state.get_current_task().await;
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
let task_details = if let Some(task) = ¤t_task_state {
docker_service.get_task_details(task).await
} else {
None
};
let request = if let Some(task) = current_task_state {
let metrics_for_task = metrics_store
.get_metrics_for_task(task.id.to_string())
.await;
HeartbeatRequest {
address: wallet.address().to_string(),
task_id: Some(task.id.to_string()),
task_state: Some(task.state.to_string()),
metrics: Some(metrics_for_task),
version: Some(
option_env!("WORKER_VERSION")
.unwrap_or(env!("CARGO_PKG_VERSION"))
.to_string(),
),
timestamp: Some(ts),
p2p_id: Some(p2p_id.to_string()), // TODO: this should always be `Some`
task_details,
}
} else {
HeartbeatRequest {
address: wallet.address().to_string(),
version: Some(
option_env!("WORKER_VERSION")
.unwrap_or(env!("CARGO_PKG_VERSION"))
.to_string(),
),
timestamp: Some(ts),
p2p_id: Some(p2p_id.to_string()), // TODO: this should always be `Some`
..Default::default()
}
};
let signature = sign_request_with_nonce(
"/heartbeat",
&wallet,
Some(&serde_json::to_value(&request).map_err(|e| {
log::error!("Failed to serialize request: {e:?}");
HeartbeatError::RequestFailed
})?),
)
.await
.map_err(|e| {
log::error!("Failed to sign request: {e:?}");
HeartbeatError::RequestFailed
})?;
let mut headers = reqwest::header::HeaderMap::new();
headers.insert("x-address", wallet.address().to_string().parse().unwrap());
headers.insert("x-signature", signature.signature.parse().unwrap());
let response = client
.post(endpoint.unwrap())
.json(&signature.data)
.headers(headers)
.send()
.await
.map_err(|e| {
log::error!("Request failed: {e:?}");
HeartbeatError::RequestFailed
})?;
let response = if response.status().is_success() {
response
} else {
let status = response.status();
let error_text = response
.text()
.await
.unwrap_or_else(|_| "Failed to read error response".to_string());
log::error!("Error response received: status={status}, body={error_text}");
return Err(HeartbeatError::RequestFailed);
};
let response = response
.json::<ApiResponse<HeartbeatResponse>>()
.await
.map_err(|e| {
log::error!("Failed to parse response: {e:?}");
HeartbeatError::RequestFailed
})?;
let heartbeat_response = response.data.clone();
log::debug!("Heartbeat response: {heartbeat_response:?}");
// Get current task before updating
let old_task = docker_service.state.get_current_task().await;
let new_task = match heartbeat_response.current_task {
Some(task) => {
// Only log if task image changed or there was no previous task
if old_task
.as_ref()
.map(|t| t.image != task.image)
.unwrap_or(true)
{
log::info!("Current task is to run image: {:?}", task.image);
}
Some(task)
}
None => None,
};
// Clear metrics for old task if task ID changed
if let (Some(old), Some(new)) = (&old_task, &new_task) {
if old.id != new.id {
log::info!("Clearing metrics for old task: {}", old.id);
metrics_store
.clear_metrics_for_task(&old.id.to_string())
.await;
}
} else if let (Some(old), None) = (&old_task, &new_task) {
// Clear metrics when transitioning from having a task to no task
log::info!("Clearing metrics for completed task: {}", old.id);
metrics_store
.clear_metrics_for_task(&old.id.to_string())
.await;
}
docker_service.state.set_current_task(new_task).await;
let is_running = docker_service.state.get_is_running().await;
if !is_running {
tokio::spawn(async move {
if let Err(e) = docker_service.run().await {
log::error!("❌ Docker service failed: {e}");
}
});
}
Ok(response.data)
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/operations/heartbeat/mod.rs | crates/worker/src/operations/heartbeat/mod.rs | pub(crate) mod service;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/cli/command.rs | crates/worker/src/cli/command.rs | use crate::checks::hardware::HardwareChecker;
use crate::checks::issue::IssueReport;
use crate::checks::software::SoftwareChecker;
use crate::checks::stun::StunCheck;
use crate::console::Console;
use crate::docker::taskbridge::TaskBridge;
use crate::docker::DockerService;
use crate::metrics::store::MetricsStore;
use crate::operations::compute_node::ComputeNodeOperations;
use crate::operations::heartbeat::service::HeartbeatService;
use crate::operations::provider::ProviderOperations;
use crate::services::discovery::DiscoveryService;
use crate::services::discovery_updater::DiscoveryUpdater;
use crate::state::system_state::SystemState;
use crate::TaskHandles;
use alloy::primitives::utils::format_ether;
use alloy::primitives::Address;
use alloy::primitives::U256;
use alloy::signers::local::PrivateKeySigner;
use alloy::signers::Signer;
use clap::{Parser, Subcommand};
use log::{error, info};
use shared::models::node::ComputeRequirements;
use shared::models::node::Node;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::contracts::structs::compute_pool::PoolStatus;
use shared::web3::wallet::Wallet;
use shared::web3::wallet::WalletProvider;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use url::Url;
const APP_VERSION: &str = match option_env!("WORKER_VERSION") {
Some(version) => version,
None => env!("CARGO_PKG_VERSION"),
};
#[derive(Parser)]
#[command(author, version = APP_VERSION, about, long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand)]
pub enum Commands {
Run {
/// RPC URL
#[arg(long, default_value = option_env!("WORKER_RPC_URL").unwrap_or("http://localhost:8545"))]
rpc_url: String,
/// Port number for the worker to listen on - DEPRECATED
#[arg(long, default_value = "8080")]
port: u16,
/// Port for libp2p service
#[arg(long, default_value = "4002")]
libp2p_port: u16,
/// External IP address for the worker to advertise
#[arg(long)]
external_ip: Option<String>,
/// Compute pool ID
#[arg(long)]
compute_pool_id: u32,
/// Dry run the command without starting the worker
#[arg(long, default_value = "false")]
dry_run: bool,
/// Optional state storage directory overwrite
#[arg(long)]
state_dir_overwrite: Option<String>,
/// Disable state storing
#[arg(long, default_value = "false")]
disable_state_storing: bool,
/// Auto recover from previous state
#[arg(long, default_value = "false")]
no_auto_recover: bool,
/// Discovery service URL
#[arg(long)]
discovery_url: Option<String>,
/// Private key for the provider (not recommended, use environment variable PRIVATE_KEY_PROVIDER instead)
#[arg(long)]
private_key_provider: Option<String>,
/// Private key for the node (not recommended, use environment variable PRIVATE_KEY_NODE instead)
#[arg(long)]
private_key_node: Option<String>,
/// Auto accept transactions
#[arg(long, default_value = "false")]
auto_accept: bool,
/// Retry count until provider has enough balance to stake (0 for unlimited retries)
#[arg(long, default_value = "0")]
funding_retry_count: u32,
/// Skip system requirement checks (for development/testing)
#[arg(long, default_value = "false")]
skip_system_checks: bool,
/// Loki URL
#[arg(long)]
loki_url: Option<String>,
/// Log level
#[arg(long)]
log_level: Option<String>,
/// Storage path for worker data (overrides automatic selection)
#[arg(long)]
storage_path: Option<String>,
/// Disable host network mode
#[arg(long, default_value = "false")]
disable_host_network_mode: bool,
#[arg(long, default_value = "false")]
with_ipfs_upload: bool,
#[arg(long, default_value = "5001")]
ipfs_port: u16,
},
Check {},
/// Generate new wallets for provider and node
GenerateWallets {},
/// Generate new wallet for node only
GenerateNodeWallet {},
/// Get balance of provider and node
Balance {
/// Private key for the provider
#[arg(long)]
private_key: Option<String>,
/// RPC URL
#[arg(long, default_value = option_env!("WORKER_RPC_URL").unwrap_or("http://localhost:8545"))]
rpc_url: String,
},
/// Sign Message
SignMessage {
/// Message to sign
#[arg(long)]
message: String,
/// Private key for the provider
#[arg(long)]
private_key_provider: Option<String>,
/// Private key for the node
#[arg(long)]
private_key_node: Option<String>,
},
/// Deregister worker from compute pool
Deregister {
/// Private key for the provider
#[arg(long)]
private_key_provider: Option<String>,
/// Private key for the node
#[arg(long)]
private_key_node: Option<String>,
/// RPC URL
#[arg(long, default_value = option_env!("WORKER_RPC_URL").unwrap_or("http://localhost:8545"))]
rpc_url: String,
/// Compute pool ID
#[arg(long)]
compute_pool_id: u32,
},
}
pub async fn execute_command(
command: &Commands,
cancellation_token: CancellationToken,
task_handles: TaskHandles,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match command {
Commands::Run {
port: _,
libp2p_port,
external_ip,
compute_pool_id,
dry_run: _,
rpc_url,
discovery_url,
state_dir_overwrite,
disable_state_storing,
no_auto_recover,
private_key_provider,
private_key_node,
auto_accept,
funding_retry_count,
skip_system_checks,
loki_url: _,
log_level: _,
storage_path,
disable_host_network_mode,
with_ipfs_upload,
ipfs_port,
} => {
if *disable_state_storing && !(*no_auto_recover) {
Console::user_error(
"Cannot disable state storing and enable auto recover at the same time. Use --no-auto-recover to disable auto recover.",
);
std::process::exit(1);
}
let state = match SystemState::new(
state_dir_overwrite.clone(),
*disable_state_storing,
*compute_pool_id,
) {
Ok(state) => state,
Err(e) => {
error!("❌ Failed to initialize system state: {e}");
std::process::exit(1);
}
};
let state = Arc::new(state);
let private_key_provider = if let Some(key) = private_key_provider {
Console::warning("Using private key from command line is not recommended. Consider using PRIVATE_KEY_PROVIDER environment variable instead.");
key.clone()
} else {
std::env::var("PRIVATE_KEY_PROVIDER").expect("PRIVATE_KEY_PROVIDER must be set")
};
let private_key_node = if let Some(key) = private_key_node {
Console::warning("Using private key from command line is not recommended. Consider using PRIVATE_KEY_NODE environment variable instead.");
key.clone()
} else {
std::env::var("PRIVATE_KEY_NODE").expect("PRIVATE_KEY_NODE must be set")
};
let mut recover_last_state = !(*no_auto_recover);
let version = APP_VERSION;
Console::section("🚀 PRIME WORKER INITIALIZATION - beta");
Console::info("Version", version);
/*
Initialize Wallet instances
*/
let provider_wallet_instance =
match Wallet::new(&private_key_provider, Url::parse(rpc_url).unwrap()) {
Ok(wallet) => wallet,
Err(err) => {
error!("Failed to create wallet: {err}");
std::process::exit(1);
}
};
let node_wallet_instance =
match Wallet::new(&private_key_node, Url::parse(rpc_url).unwrap()) {
Ok(wallet) => wallet,
Err(err) => {
error!("❌ Failed to create wallet: {err}");
std::process::exit(1);
}
};
/*
Initialize dependencies - services, contracts, operations
*/
let contracts = ContractBuilder::new(provider_wallet_instance.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.with_stake_manager()
.build()
.unwrap();
let provider_ops = ProviderOperations::new(
provider_wallet_instance.clone(),
contracts.clone(),
*auto_accept,
);
let provider_ops_cancellation = cancellation_token.clone();
let compute_node_state = state.clone();
let compute_node_ops = ComputeNodeOperations::new(
&provider_wallet_instance,
&node_wallet_instance,
contracts.clone(),
compute_node_state,
);
let discovery_urls = vec![discovery_url
.clone()
.unwrap_or("http://localhost:8089".to_string())];
let discovery_service =
DiscoveryService::new(node_wallet_instance.clone(), discovery_urls, None);
let discovery_state = state.clone();
let discovery_updater =
DiscoveryUpdater::new(discovery_service.clone(), discovery_state.clone());
let pool_id = U256::from(*compute_pool_id);
let pool_info = loop {
match contracts.compute_pool.get_pool_info(pool_id).await {
Ok(pool) if pool.status == PoolStatus::ACTIVE => break Arc::new(pool),
Ok(_) => {
Console::warning("Pool is not active yet. Checking again in 15 seconds.");
tokio::select! {
_ = tokio::time::sleep(tokio::time::Duration::from_secs(15)) => {},
_ = cancellation_token.cancelled() => return Ok(()),
}
}
Err(e) => {
error!("Failed to get pool info: {e}");
return Ok(());
}
}
};
let stun_check = StunCheck::new(Duration::from_secs(5), 0);
let detected_external_ip = match stun_check.get_public_ip().await {
Ok(ip) => ip,
Err(e) => {
error!("❌ Failed to get public IP: {e}");
std::process::exit(1);
}
};
let node_config = Node {
id: node_wallet_instance
.wallet
.default_signer()
.address()
.to_string(),
ip_address: external_ip.clone().unwrap_or(detected_external_ip.clone()),
port: 0,
provider_address: provider_wallet_instance
.wallet
.default_signer()
.address()
.to_string(),
compute_specs: None,
compute_pool_id: *compute_pool_id,
worker_p2p_id: None,
worker_p2p_addresses: None,
};
let issue_tracker = Arc::new(RwLock::new(IssueReport::new()));
let mut hardware_check = HardwareChecker::new(Some(issue_tracker.clone()));
let mut node_config = match hardware_check
.check_hardware(node_config, storage_path.clone())
.await
{
Ok(config) => config,
Err(e) => {
Console::user_error(&format!("❌ Hardware check failed: {e}"));
std::process::exit(1);
}
};
let software_checker = SoftwareChecker::new(Some(issue_tracker.clone()));
if let Err(err) = software_checker.check_software(&node_config).await {
Console::user_error(&format!("❌ Software check failed: {err}"));
std::process::exit(1);
}
if let Some(external_ip) = external_ip {
if *external_ip != detected_external_ip {
Console::warning(
&format!(
"Automatically detected external IP {detected_external_ip} does not match the provided external IP {external_ip}"
),
);
}
}
let issues = issue_tracker.read().await;
issues.print_issues();
if issues.has_critical_issues() {
if !*skip_system_checks {
Console::user_error("❌ Critical issues found. Exiting.");
std::process::exit(1);
} else {
Console::warning("Critical issues found. Ignoring and continuing.");
}
}
let required_specs = match ComputeRequirements::from_str(&pool_info.pool_data_uri) {
Ok(specs) => Some(specs),
Err(e) => {
log::debug!("❌ Could not parse pool compute specs: {e}");
None
}
};
// Check if node meets the pool's compute requirements
if let Some(ref compute_specs) = node_config.compute_specs {
if let Some(ref required_specs) = required_specs {
if !compute_specs.meets(required_specs) {
Console::user_error(
"❌ Your node does not meet the compute requirements for this pool.",
);
info!("Required compute requirements:\n{required_specs}");
if !*skip_system_checks {
std::process::exit(1);
} else {
Console::warning(
"Ignoring compute requirements mismatch and continuing.",
);
}
} else {
Console::success(
"✅ Your node meets the compute requirements for this pool.",
);
}
} else {
Console::success("✅ No specific compute requirements for this pool.");
}
} else {
Console::warning("Cannot verify compute requirements: node specs not available.");
if !*skip_system_checks {
std::process::exit(1);
} else {
Console::warning("Ignoring missing compute specs and continuing.");
}
}
let metrics_store = Arc::new(MetricsStore::new());
let heartbeat_metrics_clone = metrics_store.clone();
let bridge_contracts = contracts.clone();
let bridge_wallet = node_wallet_instance.clone();
let ipfs = if *with_ipfs_upload {
let conn_limits =
rust_ipfs::ConnectionLimits::default().with_max_established(Some(100));
let builder = rust_ipfs::UninitializedIpfsDefault::new()
.set_default_listener()
.with_default()
.set_connection_limits(conn_limits)
.set_listening_addrs(vec![
format!("/ip4/0.0.0.0/tcp/{ipfs_port}")
.parse()
.expect("valid multiaddr"),
format!("/ip4/0.0.0.0/udp/{ipfs_port}/quic-v1")
.parse()
.expect("valid multiaddr"),
])
.listen_as_external_addr()
.with_upnp();
let ipfs = match builder.start().await {
Ok(ipfs) => ipfs,
Err(e) => {
error!("❌ Failed to initialize IPFS node: {e}");
std::process::exit(1);
}
};
if let Err(e) = ipfs.default_bootstrap().await {
error!("❌ Failed to add default bootstrap nodes to IPFS: {e}");
std::process::exit(1);
}
if let Err(e) = ipfs.bootstrap().await {
error!("❌ Failed to bootstrap IPFS node: {e}");
std::process::exit(1);
}
Console::success("IPFS node initialized and bootstrapped successfully");
Some(ipfs)
} else {
None
};
let docker_storage_path = node_config
.compute_specs
.as_ref()
.expect("Hardware check should have populated compute_specs")
.storage_path
.clone();
let task_bridge = match TaskBridge::new(
None,
metrics_store,
Some(bridge_contracts),
Some(node_config.clone()),
Some(bridge_wallet),
docker_storage_path.clone(),
state.clone(),
ipfs,
) {
Ok(bridge) => bridge,
Err(e) => {
error!("❌ Failed to create Task Bridge: {e}");
std::process::exit(1);
}
};
let system_memory = node_config
.compute_specs
.as_ref()
.map(|specs| specs.ram_mb.unwrap_or(0));
let gpu = node_config
.compute_specs
.clone()
.and_then(|specs| specs.gpu.clone());
let docker_service = Arc::new(DockerService::new(
cancellation_token.clone(),
gpu,
system_memory,
task_bridge
.get_socket_path()
.to_str()
.expect("path is valid utf-8 string")
.to_string(),
docker_storage_path,
node_wallet_instance
.wallet
.default_signer()
.address()
.to_string(),
*disable_host_network_mode,
));
let bridge_cancellation_token = cancellation_token.clone();
tokio::spawn(async move {
tokio::select! {
_ = bridge_cancellation_token.cancelled() => {
}
_ = task_bridge.run() => {
}
}
});
let heartbeat_state = state.clone();
let heartbeat_service = HeartbeatService::new(
Duration::from_secs(10),
cancellation_token.clone(),
task_handles.clone(),
node_wallet_instance.clone(),
docker_service.clone(),
heartbeat_metrics_clone.clone(),
heartbeat_state,
);
let gpu_count: u32 = match &node_config.compute_specs {
Some(specs) => specs
.gpu
.as_ref()
.map(|gpu| gpu.count.unwrap_or(0))
.unwrap_or(0),
None => 0,
};
let compute_units = U256::from(std::cmp::max(1, gpu_count * 1000));
Console::section("Syncing with Network");
// Check if provider exists first
let provider_exists = match provider_ops.check_provider_exists().await {
Ok(exists) => exists,
Err(e) => {
error!("❌ Failed to check if provider exists: {e}");
std::process::exit(1);
}
};
let Some(stake_manager) = contracts.stake_manager.as_ref() else {
error!("❌ Stake manager not initialized");
std::process::exit(1);
};
Console::title("Provider Status");
let is_whitelisted = match provider_ops.check_provider_whitelisted().await {
Ok(is_whitelisted) => is_whitelisted,
Err(e) => {
error!("Failed to check provider whitelist status: {e}");
std::process::exit(1);
}
};
if provider_exists && is_whitelisted {
Console::success("Provider is registered and whitelisted");
} else {
let required_stake = match stake_manager
.calculate_stake(compute_units, U256::from(0))
.await
{
Ok(stake) => stake,
Err(e) => {
error!("❌ Failed to calculate required stake: {e}");
std::process::exit(1);
}
};
Console::info("Required stake", &format_ether(required_stake).to_string());
if let Err(e) = provider_ops
.retry_register_provider(
required_stake,
*funding_retry_count,
cancellation_token.clone(),
)
.await
{
error!("❌ Failed to register provider: {e}");
std::process::exit(1);
}
}
let compute_node_exists = match compute_node_ops.check_compute_node_exists().await {
Ok(exists) => exists,
Err(e) => {
error!("❌ Failed to check if compute node exists: {e}");
std::process::exit(1);
}
};
let provider_total_compute = match contracts
.compute_registry
.get_provider_total_compute(
provider_wallet_instance.wallet.default_signer().address(),
)
.await
{
Ok(compute) => compute,
Err(e) => {
error!("❌ Failed to get provider total compute: {e}");
std::process::exit(1);
}
};
let provider_stake = stake_manager
.get_stake(provider_wallet_instance.wallet.default_signer().address())
.await
.unwrap_or_default();
// If we are already registered we do not need additionally compute units
let compute_units = match compute_node_exists {
true => U256::from(0),
false => compute_units,
};
let required_stake = match stake_manager
.calculate_stake(compute_units, provider_total_compute)
.await
{
Ok(stake) => stake,
Err(e) => {
error!("❌ Failed to calculate required stake: {e}");
std::process::exit(1);
}
};
if required_stake > provider_stake {
Console::info(
"Provider stake is less than required stake",
&format!(
"Required: {} tokens, Current: {} tokens",
format_ether(required_stake),
format_ether(provider_stake)
),
);
match provider_ops
.increase_stake(required_stake - provider_stake)
.await
{
Ok(_) => {
Console::success("Successfully increased stake");
}
Err(e) => {
error!("❌ Failed to increase stake: {e}");
std::process::exit(1);
}
}
}
Console::title("Compute Node Status");
if compute_node_exists {
// TODO: What if we have two nodes?
Console::success("Compute node is registered");
recover_last_state = true;
} else {
match compute_node_ops.add_compute_node(compute_units).await {
Ok(added_node) => {
if added_node {
// If we are adding a new compute node we wait for a proper
// invite and do not recover from previous state
recover_last_state = false;
}
}
Err(e) => {
error!("❌ Failed to add compute node: {e}");
std::process::exit(1);
}
}
}
// Start P2P service
Console::title("🔗 Starting P2P Service");
let heartbeat = match heartbeat_service.clone() {
Ok(service) => service,
Err(e) => {
error!("❌ Heartbeat service is not available: {e}");
std::process::exit(1);
}
};
let validators = match contracts.prime_network.get_validator_role().await {
Ok(validators) => validators,
Err(e) => {
error!("Failed to get validator role: {e}");
std::process::exit(1);
}
};
if validators.is_empty() {
error!("❌ No validator roles found on contracts - cannot start worker without validators");
error!("This means the smart contract has no registered validators, which is required for signature validation");
error!("Please ensure validators are properly registered on the PrimeNetwork contract before starting the worker");
std::process::exit(1);
}
let mut allowed_addresses = vec![pool_info.creator, pool_info.compute_manager_key];
allowed_addresses.extend(validators);
let validator_addresses = std::collections::HashSet::from_iter(allowed_addresses);
let p2p_service = match crate::p2p::Service::new(
state.get_p2p_keypair().clone(),
*libp2p_port,
node_wallet_instance.clone(),
validator_addresses,
docker_service.clone(),
heartbeat.clone(),
state.clone(),
contracts.clone(),
provider_wallet_instance.clone(),
cancellation_token.clone(),
) {
Ok(service) => service,
Err(e) => {
error!("❌ Failed to start P2P service: {e}");
std::process::exit(1);
}
};
let peer_id = p2p_service.peer_id();
node_config.worker_p2p_id = Some(peer_id.to_string());
let external_p2p_address =
format!("/ip4/{}/tcp/{}", node_config.ip_address, *libp2p_port);
node_config.worker_p2p_addresses = Some(
p2p_service
.listen_addrs()
.iter()
.map(|addr| addr.to_string())
.chain(vec![external_p2p_address])
.collect(),
);
tokio::task::spawn(p2p_service.run());
Console::success(&format!("P2P service started with ID: {peer_id}",));
let mut attempts = 0;
let max_attempts = 100;
while attempts < max_attempts {
Console::title("📦 Uploading discovery info");
match discovery_service.upload_discovery_info(&node_config).await {
Ok(_) => break,
Err(e) => {
attempts += 1;
let error_msg = e.to_string();
// Check if this is a Cloudflare block
if error_msg.contains("403 Forbidden")
&& (error_msg.contains("Cloudflare")
|| error_msg.contains("Sorry, you have been blocked")
|| error_msg.contains("Attention Required!"))
{
error!(
"Attempt {attempts}: ❌ Discovery service blocked by Cloudflare protection. This may indicate:"
);
error!(" • Your IP address has been flagged by Cloudflare security");
error!(" • Too many requests from your location");
error!(" • Network configuration issues");
error!(" • Discovery service may be under DDoS protection");
error!(
"Please contact support or try from a different network/IP address"
);
} else {
error!("Attempt {attempts}: ❌ Failed to upload discovery info: {e}");
}
if attempts >= max_attempts {
if error_msg.contains("403 Forbidden")
&& (error_msg.contains("Cloudflare")
|| error_msg.contains("Sorry, you have been blocked"))
{
error!("❌ Unable to reach discovery service due to Cloudflare blocking after {max_attempts} attempts");
error!("This is likely a network/IP issue rather than a worker configuration problem");
}
std::process::exit(1);
}
}
}
tokio::time::sleep(tokio::time::Duration::from_secs(10)).await;
}
Console::success("Discovery info uploaded");
Console::section("Starting Worker with Task Bridge");
// Start monitoring compute node status on chain
provider_ops.start_monitoring(provider_ops_cancellation);
let pool_id = state.get_compute_pool_id();
if let Err(err) = compute_node_ops.start_monitoring(cancellation_token.clone(), pool_id)
{
error!("❌ Failed to start node monitoring: {err}");
std::process::exit(1);
}
discovery_updater.start_auto_update(node_config);
if recover_last_state {
info!("Recovering from previous state: {recover_last_state}");
heartbeat.activate_heartbeat_if_endpoint_exists().await;
}
// Keep the worker running and listening for P2P connections
Console::success("Worker is now running and listening for P2P connections...");
// Wait for cancellation signal to gracefully shutdown
cancellation_token.cancelled().await;
Console::info(
"Shutdown signal received",
"Gracefully shutting down worker...",
);
Ok(())
}
Commands::Check {} => {
Console::section("🔍 PRIME WORKER SYSTEM CHECK");
let issues = Arc::new(RwLock::new(IssueReport::new()));
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/cli/mod.rs | crates/worker/src/cli/mod.rs | pub(crate) mod command;
pub use command::{execute_command, Cli};
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/p2p/mod.rs | crates/worker/src/p2p/mod.rs | use anyhow::Context as _;
use anyhow::Result;
use futures::stream::FuturesUnordered;
use p2p::InviteRequestUrl;
use p2p::Node;
use p2p::NodeBuilder;
use p2p::PeerId;
use p2p::Response;
use p2p::{IncomingMessage, Libp2pIncomingMessage, OutgoingMessage};
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::wallet::Wallet;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
use std::time::SystemTime;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use crate::docker::DockerService;
use crate::operations::heartbeat::service::HeartbeatService;
use crate::state::system_state::SystemState;
use shared::web3::wallet::WalletProvider;
pub(crate) struct Service {
node: Node,
incoming_messages: Receiver<IncomingMessage>,
cancellation_token: CancellationToken,
context: Context,
}
impl Service {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
keypair: p2p::Keypair,
port: u16,
wallet: Wallet,
validator_addresses: HashSet<alloy::primitives::Address>,
docker_service: Arc<DockerService>,
heartbeat_service: Arc<HeartbeatService>,
system_state: Arc<SystemState>,
contracts: Contracts<WalletProvider>,
provider_wallet: Wallet,
cancellation_token: CancellationToken,
) -> Result<Self> {
let (node, incoming_messages, outgoing_messages) =
build_p2p_node(keypair, port, cancellation_token.clone())
.context("failed to build p2p node")?;
Ok(Self {
node,
incoming_messages,
cancellation_token,
context: Context::new(
wallet,
outgoing_messages,
validator_addresses,
docker_service,
heartbeat_service,
system_state,
contracts,
provider_wallet,
),
})
}
pub(crate) fn peer_id(&self) -> PeerId {
self.node.peer_id()
}
pub(crate) fn listen_addrs(&self) -> &[p2p::Multiaddr] {
self.node.listen_addrs()
}
pub(crate) async fn run(self) {
use futures::StreamExt as _;
let Self {
node,
mut incoming_messages,
cancellation_token,
context,
} = self;
tokio::task::spawn(node.run());
let mut message_handlers = FuturesUnordered::new();
loop {
tokio::select! {
_ = cancellation_token.cancelled() => {
break;
}
Some(message) = incoming_messages.recv() => {
let context = context.clone();
let handle = tokio::task::spawn(
handle_incoming_message(message, context)
);
message_handlers.push(handle);
}
Some(res) = message_handlers.next() => {
if let Err(e) = res {
tracing::error!("failed to handle incoming message: {e}");
}
}
}
}
}
}
fn build_p2p_node(
keypair: p2p::Keypair,
port: u16,
cancellation_token: CancellationToken,
) -> Result<(Node, Receiver<IncomingMessage>, Sender<OutgoingMessage>)> {
let (node, incoming_message_rx, outgoing_message_tx) = NodeBuilder::new()
.with_keypair(keypair)
.with_port(port)
.with_authentication()
.with_hardware_challenge()
.with_invite()
.with_get_task_logs()
.with_restart()
.with_cancellation_token(cancellation_token)
.try_build()
.context("failed to build p2p node")?;
Ok((node, incoming_message_rx, outgoing_message_tx))
}
#[derive(Clone)]
struct Context {
authorized_peers: Arc<RwLock<HashSet<PeerId>>>,
wallet: Wallet,
validator_addresses: Arc<HashSet<alloy::primitives::Address>>,
// for validator authentication requests
ongoing_auth_challenges: Arc<RwLock<HashMap<PeerId, String>>>, // use request_id?
nonce_cache: Arc<RwLock<HashMap<String, SystemTime>>>,
outgoing_messages: Sender<OutgoingMessage>,
// for get_task_logs and restart requests
docker_service: Arc<DockerService>,
// for invite requests
heartbeat_service: Arc<HeartbeatService>,
system_state: Arc<SystemState>,
contracts: Contracts<WalletProvider>,
provider_wallet: Wallet,
}
impl Context {
#[allow(clippy::too_many_arguments)]
fn new(
wallet: Wallet,
outgoing_messages: Sender<OutgoingMessage>,
validator_addresses: HashSet<alloy::primitives::Address>,
docker_service: Arc<DockerService>,
heartbeat_service: Arc<HeartbeatService>,
system_state: Arc<SystemState>,
contracts: Contracts<WalletProvider>,
provider_wallet: Wallet,
) -> Self {
Self {
authorized_peers: Arc::new(RwLock::new(HashSet::new())),
ongoing_auth_challenges: Arc::new(RwLock::new(HashMap::new())),
nonce_cache: Arc::new(RwLock::new(HashMap::new())),
wallet,
outgoing_messages,
validator_addresses: Arc::new(validator_addresses),
docker_service,
heartbeat_service,
system_state,
contracts,
provider_wallet,
}
}
}
async fn handle_incoming_message(message: IncomingMessage, context: Context) -> Result<()> {
match message.message {
Libp2pIncomingMessage::Request {
request_id: _,
request,
channel,
} => {
tracing::debug!("received incoming request {request:?}");
handle_incoming_request(message.peer, request, channel, context).await?;
}
Libp2pIncomingMessage::Response {
request_id: _,
response,
} => {
tracing::debug!("received incoming response {response:?}");
handle_incoming_response(response);
}
}
Ok(())
}
async fn handle_incoming_request(
from: PeerId,
request: p2p::Request,
channel: p2p::ResponseChannel,
context: Context,
) -> Result<()> {
let resp = match request {
p2p::Request::Authentication(req) => {
tracing::debug!("handling ValidatorAuthentication request");
match req {
p2p::AuthenticationRequest::Initiation(req) => {
handle_validator_authentication_initiation_request(from, req, &context)
.await
.context("failed to handle ValidatorAuthenticationInitiationRequest")?
}
p2p::AuthenticationRequest::Solution(req) => {
match handle_validator_authentication_solution_request(from, req, &context)
.await
{
Ok(()) => p2p::AuthenticationSolutionResponse::Granted.into(),
Err(e) => {
tracing::error!(
"failed to handle ValidatorAuthenticationSolutionRequest: {e:?}"
);
p2p::AuthenticationSolutionResponse::Rejected.into()
}
}
}
}
}
p2p::Request::HardwareChallenge(req) => {
tracing::debug!("handling HardwareChallenge request");
handle_hardware_challenge_request(from, req, &context)
.await
.context("failed to handle HardwareChallenge request")?
}
p2p::Request::Invite(req) => {
tracing::debug!("handling Invite request");
match handle_invite_request(from, req, &context).await {
Ok(()) => p2p::InviteResponse::Ok.into(),
Err(e) => p2p::InviteResponse::Error(e.to_string()).into(),
}
}
p2p::Request::GetTaskLogs => {
tracing::debug!("handling GetTaskLogs request");
handle_get_task_logs_request(from, &context).await
}
p2p::Request::RestartTask => {
tracing::debug!("handling Restart request");
handle_restart_request(from, &context).await
}
p2p::Request::General(_) => {
todo!()
}
};
let outgoing_message = resp.into_outgoing_message(channel);
context
.outgoing_messages
.send(outgoing_message)
.await
.context("failed to send ValidatorAuthentication response")?;
Ok(())
}
async fn handle_validator_authentication_initiation_request(
from: PeerId,
req: p2p::AuthenticationInitiationRequest,
context: &Context,
) -> Result<Response> {
use rand_v8::Rng as _;
use shared::security::request_signer::sign_message;
// generate a fresh cryptographically secure challenge message for this auth attempt
let challenge_bytes: [u8; 32] = rand_v8::rngs::OsRng.gen();
let challenge_message = hex::encode(challenge_bytes);
let signature = sign_message(&req.message, &context.wallet)
.await
.map_err(|e| anyhow::anyhow!("failed to sign message: {e:?}"))?;
// store the challenge message in nonce cache to prevent replay
let mut nonce_cache = context.nonce_cache.write().await;
nonce_cache.insert(challenge_message.clone(), SystemTime::now());
// store the current challenge for this peer
let mut ongoing_auth_challenges = context.ongoing_auth_challenges.write().await;
ongoing_auth_challenges.insert(from, challenge_message.clone());
Ok(p2p::AuthenticationInitiationResponse {
message: challenge_message,
signature,
}
.into())
}
async fn handle_validator_authentication_solution_request(
from: PeerId,
req: p2p::AuthenticationSolutionRequest,
context: &Context,
) -> Result<()> {
use std::str::FromStr as _;
let mut ongoing_auth_challenges = context.ongoing_auth_challenges.write().await;
let challenge_message = ongoing_auth_challenges
.remove(&from)
.ok_or_else(|| anyhow::anyhow!("no ongoing authentication challenge for peer {from}"))?;
let mut nonce_cache = context.nonce_cache.write().await;
if nonce_cache.remove(&challenge_message).is_none() {
anyhow::bail!("challenge message {challenge_message} not found in nonce cache");
}
let Ok(signature) = alloy::primitives::Signature::from_str(&req.signature) else {
anyhow::bail!("failed to parse signature from message");
};
let Ok(recovered_address) = signature.recover_address_from_msg(challenge_message) else {
anyhow::bail!("failed to recover address from signature and message");
};
if !context.validator_addresses.contains(&recovered_address) {
anyhow::bail!("recovered address {recovered_address} is not in the list of authorized validator addresses");
}
let mut authorized_peers = context.authorized_peers.write().await;
authorized_peers.insert(from);
Ok(())
}
async fn handle_hardware_challenge_request(
from: PeerId,
request: p2p::HardwareChallengeRequest,
context: &Context,
) -> Result<Response> {
let authorized_peers = context.authorized_peers.read().await;
if !authorized_peers.contains(&from) {
// TODO: error response variant?
anyhow::bail!("unauthorized peer {from} attempted to access HardwareChallenge request");
}
let challenge_response = p2p::calc_matrix(&request.challenge);
let response = p2p::HardwareChallengeResponse {
response: challenge_response,
timestamp: SystemTime::now(),
};
Ok(response.into())
}
async fn handle_get_task_logs_request(from: PeerId, context: &Context) -> Response {
let authorized_peers = context.authorized_peers.read().await;
if !authorized_peers.contains(&from) {
return p2p::GetTaskLogsResponse::Error("unauthorized".to_string()).into();
}
match context.docker_service.get_logs().await {
Ok(logs) => p2p::GetTaskLogsResponse::Ok(logs).into(),
Err(e) => p2p::GetTaskLogsResponse::Error(format!("failed to get task logs: {e:?}")).into(),
}
}
async fn handle_restart_request(from: PeerId, context: &Context) -> Response {
let authorized_peers = context.authorized_peers.read().await;
if !authorized_peers.contains(&from) {
return p2p::RestartTaskResponse::Error("unauthorized".to_string()).into();
}
match context.docker_service.restart_task().await {
Ok(()) => p2p::RestartTaskResponse::Ok.into(),
Err(e) => p2p::RestartTaskResponse::Error(format!("failed to restart task: {e:?}")).into(),
}
}
fn handle_incoming_response(response: p2p::Response) {
// critical developer error if any of these happen, could panic here
match response {
p2p::Response::Authentication(_) => {
tracing::error!("worker should never receive ValidatorAuthentication responses");
}
p2p::Response::HardwareChallenge(_) => {
tracing::error!("worker should never receive HardwareChallenge responses");
}
p2p::Response::Invite(_) => {
tracing::error!("worker should never receive Invite responses");
}
p2p::Response::GetTaskLogs(_) => {
tracing::error!("worker should never receive GetTaskLogs responses");
}
p2p::Response::RestartTask(_) => {
tracing::error!("worker should never receive Restart responses");
}
p2p::Response::General(_) => {
todo!()
}
}
}
async fn handle_invite_request(
from: PeerId,
req: p2p::InviteRequest,
context: &Context,
) -> Result<()> {
use crate::console::Console;
use shared::web3::contracts::helpers::utils::retry_call;
use shared::web3::contracts::structs::compute_pool::PoolStatus;
let authorized_peers = context.authorized_peers.read().await;
if !authorized_peers.contains(&from) {
return Err(anyhow::anyhow!(
"unauthorized peer {from} attempted to send invite"
));
}
if context.system_state.is_running().await {
anyhow::bail!("heartbeat is currently running and in a compute pool");
}
if req.pool_id != context.system_state.get_compute_pool_id() {
anyhow::bail!(
"pool ID mismatch: expected {}, got {}",
context.system_state.get_compute_pool_id(),
req.pool_id
);
}
let invite_bytes = hex::decode(&req.invite).context("failed to decode invite hex")?;
if invite_bytes.len() < 65 {
anyhow::bail!("invite data is too short, expected at least 65 bytes");
}
let contracts = &context.contracts;
let pool_id = alloy::primitives::U256::from(req.pool_id);
let bytes_array: [u8; 65] = match invite_bytes[..65].try_into() {
Ok(array) => array,
Err(_) => {
anyhow::bail!("failed to convert invite bytes to 65 byte array");
}
};
let provider_address = context.provider_wallet.wallet.default_signer().address();
let pool_info = match contracts.compute_pool.get_pool_info(pool_id).await {
Ok(info) => info,
Err(err) => {
anyhow::bail!("failed to get pool info: {err:?}");
}
};
if let PoolStatus::PENDING = pool_info.status {
anyhow::bail!("invalid invite; pool is pending");
}
let node_address = vec![context.wallet.wallet.default_signer().address()];
let signatures = vec![alloy::primitives::FixedBytes::from(&bytes_array)];
let call = contracts
.compute_pool
.build_join_compute_pool_call(
pool_id,
provider_address,
node_address,
vec![req.nonce],
vec![req.expiration],
signatures,
)
.map_err(|e| anyhow::anyhow!("failed to build join compute pool call: {e:?}"))?;
let provider = &context.provider_wallet.provider;
match retry_call(call, 3, provider.clone(), None).await {
Ok(result) => {
Console::section("WORKER JOINED COMPUTE POOL");
Console::success(&format!(
"Successfully registered on chain with tx: {result}"
));
Console::info(
"Status",
"Worker is now part of the compute pool and ready to receive tasks",
);
}
Err(err) => {
anyhow::bail!("failed to join compute pool: {err:?}");
}
}
let heartbeat_endpoint = match req.url {
InviteRequestUrl::MasterIpPort(ip, port) => {
format!("http://{ip}:{port}/heartbeat")
}
InviteRequestUrl::MasterUrl(url) => format!("{url}/heartbeat"),
};
context
.heartbeat_service
.start(heartbeat_endpoint)
.await
.context("failed to start heartbeat service")?;
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/stun.rs | crates/worker/src/checks/stun.rs | use std::sync::Arc;
use std::time::Duration;
use tokio::net::UdpSocket;
use tokio::sync::mpsc;
use tokio::time::timeout;
use stun::agent::*;
use stun::client::*;
use stun::message::*;
use stun::xoraddr::*;
use tracing::{debug, error, info};
pub(crate) struct StunCheck {
pub timeout: Duration,
pub port: u16,
}
impl StunCheck {
pub(crate) fn new(timeout: Duration, port: u16) -> Self {
Self { timeout, port }
}
async fn get_ip_from_stun_server_example_pattern(
&self,
server: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let bind_addr = format!("0.0.0.0:{}", self.port);
let conn = UdpSocket::bind(bind_addr).await?;
let local_addr = conn.local_addr()?;
debug!("STUN local address for {}: {}", server, local_addr);
let server_addr = tokio::net::lookup_host(server)
.await?
.next()
.ok_or_else(|| format!("DNS resolution failed for {server}"))?;
debug!("STUN server {} resolved to {}", server, server_addr);
conn.connect(server_addr).await?;
debug!("STUN UDP socket connected to {}", server_addr);
let (handler_tx, mut handler_rx) = mpsc::unbounded_channel();
let mut client = ClientBuilder::new().with_conn(Arc::new(conn)).build()?;
let mut msg = Message::new();
msg.build(&[Box::<TransactionId>::default(), Box::new(BINDING_REQUEST)])?;
client.send(&msg, Some(Arc::new(handler_tx))).await?;
debug!("STUN request sent to {}", server);
let response_event = match timeout(self.timeout, handler_rx.recv()).await {
Ok(Some(event)) => {
debug!("STUN event received from {}", server);
event
}
Ok(None) => {
client.close().await?;
return Err(
format!("STUN handler channel closed unexpectedly for {server}").into(),
);
}
Err(_) => {
client.close().await?;
return Err(format!("Timeout waiting for STUN response from {server}").into());
}
};
let response_msg = match response_event.event_body {
Ok(msg) => msg,
Err(e) => {
client.close().await?;
return Err(format!("Error in STUN event body from {server}: {e}").into());
}
};
if response_msg.typ != MessageType::new(METHOD_BINDING, CLASS_SUCCESS_RESPONSE) {
if response_msg.typ == MessageType::new(METHOD_BINDING, CLASS_ERROR_RESPONSE) {
error!(
"STUN error response from {}: {:?}",
server, response_msg.typ
);
}
client.close().await?;
return Err(format!(
"Received unexpected STUN message type from {}: {:?}",
server, response_msg.typ
)
.into());
}
let mut xor_addr = XorMappedAddress::default();
xor_addr.get_from(&response_msg)?;
let public_ip = xor_addr.ip;
info!(
"STUN public IP {} ({}) obtained from {}",
public_ip, xor_addr, server
);
client.close().await?;
Ok(public_ip.to_string())
}
pub(crate) async fn get_public_ip(
&self,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let stun_servers = [
"stun.l.google.com:19302",
"stun.stunprotocol.org:3478",
"stun.cloudflare.com:3478",
"stun.ekiga.net:3478",
"stun.ideasip.com:3478",
];
let mut last_error: Option<Box<dyn std::error::Error + Send + Sync>> = None;
for server in stun_servers {
match self.get_ip_from_stun_server_example_pattern(server).await {
Ok(ip) => return Ok(ip),
Err(e) => {
error!("STUN failed attempt with {}: {}", server, e);
last_error = Some(e);
}
}
}
Err(last_error.unwrap_or_else(|| {
"Failed to get public IP from any STUN server (no specific error)".into()
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[tokio::test]
async fn test_get_public_ip() {
let stun_check = StunCheck::new(Duration::from_secs(5), 0);
let public_ip = stun_check.get_public_ip().await.unwrap();
println!("Public IP: {}", public_ip);
assert!(!public_ip.is_empty());
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/issue.rs | crates/worker/src/checks/issue.rs | use crate::console::Console;
use std::fmt;
use std::sync::{Arc, RwLock};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum Severity {
Warning,
Error,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum IssueType {
NoGpu, // GPU required for compute
DockerNotInstalled, // Docker required for containers
ContainerToolkitNotInstalled, // Container toolkit required for GPU
InsufficientStorage, // Minimum storage needed
InsufficientMemory, // Minimum RAM needed
InsufficientCpu, // Minimum CPU cores needed
NetworkConnectivityIssue, // Network performance issues
NoStoragePath, // No storage path found
PortUnavailable, // Port is unavailable
}
impl IssueType {
pub(crate) const fn severity(&self) -> Severity {
match self {
Self::NetworkConnectivityIssue
| Self::InsufficientCpu
| Self::InsufficientMemory
| Self::InsufficientStorage => Severity::Warning,
Self::NoGpu
| Self::DockerNotInstalled
| Self::ContainerToolkitNotInstalled
| Self::NoStoragePath
| Self::PortUnavailable => Severity::Error,
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct Issue {
issue_type: IssueType,
message: String,
}
impl Issue {
pub(crate) fn new(issue_type: IssueType, message: impl Into<String>) -> Self {
Self {
issue_type,
message: message.into(),
}
}
pub(crate) const fn severity(&self) -> Severity {
self.issue_type.severity()
}
pub(crate) fn print(&self) {
match self.severity() {
Severity::Error => Console::user_error(&format!("{self}")),
Severity::Warning => Console::warning(&format!("{self}")),
}
}
}
impl fmt::Display for Issue {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}: {}", self.issue_type, self.message)
}
}
#[derive(Debug, Default, Clone)]
pub(crate) struct IssueReport {
issues: Arc<RwLock<Vec<Issue>>>,
}
impl IssueReport {
pub(crate) fn new() -> Self {
Self::default()
}
pub(crate) fn add_issue(&self, issue_type: IssueType, message: impl Into<String>) {
if let Ok(mut issues) = self.issues.write() {
issues.push(Issue::new(issue_type, message));
}
}
pub(crate) fn print_issues(&self) {
if let Ok(issues) = self.issues.read() {
if issues.is_empty() {
Console::success("No issues found");
return;
}
Console::section("System Check Issues");
for issue in issues.iter().filter(|i| i.severity() == Severity::Error) {
issue.print();
}
for issue in issues.iter().filter(|i| i.severity() == Severity::Warning) {
issue.print();
}
}
}
pub(crate) fn has_critical_issues(&self) -> bool {
if let Ok(issues) = self.issues.read() {
return issues
.iter()
.any(|issue| matches!(issue.severity(), Severity::Error));
}
false
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/mod.rs | crates/worker/src/checks/mod.rs | pub(crate) mod hardware;
pub(crate) mod issue;
pub(crate) mod software;
pub(crate) mod stun;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/gpu.rs | crates/worker/src/checks/hardware/gpu.rs | use crate::console::Console;
use lazy_static::lazy_static;
use nvml_wrapper::Nvml;
use shared::models::node::GpuSpecs;
use std::sync::Mutex;
#[allow(dead_code)]
const BYTES_TO_MB: u64 = 1024 * 1024;
// Use lazy_static to initialize NVML once and reuse it
lazy_static! {
static ref NVML: Mutex<Option<Nvml>> = Mutex::new(None);
}
#[derive(Debug)]
#[allow(dead_code)]
struct GpuDevice {
name: String,
memory: u64,
driver_version: String,
count: u32,
indices: Vec<u32>,
}
pub(crate) fn detect_gpu() -> Vec<GpuSpecs> {
Console::title("GPU Detection");
let gpu_devices = get_gpu_status();
if gpu_devices.is_empty() {
Console::user_error("No GPU devices detected");
return vec![];
}
gpu_devices
.into_iter()
.map(|device| GpuSpecs {
count: Some(device.count),
model: Some(device.name.to_lowercase()),
memory_mb: Some((device.memory / BYTES_TO_MB) as u32),
indices: Some(device.indices),
})
.collect()
}
fn get_gpu_status() -> Vec<GpuDevice> {
let mut nvml_guard = NVML.lock().unwrap();
// Read WORKER_VISIBLE_DEVICES environment variable
let visible_devices: Option<Vec<u32>> =
std::env::var("WORKER_VISIBLE_DEVICES").ok().and_then(|s| {
if s.trim().is_empty() {
None
} else {
Some(
s.split(',')
.filter_map(|idx| idx.trim().parse::<u32>().ok())
.collect(),
)
}
});
// Initialize NVML if not already initialized
if nvml_guard.is_none() {
// Try to load the NVIDIA management library dynamically
let lib_paths = [
"libnvidia-ml.so.1", // Standard Linux path
"/usr/lib/x86_64-linux-gnu/libnvidia-ml.so.1", // Explicit path as fallback
"/usr/lib/libnvidia-ml.so.1", // CUDA installation path
];
let mut success = false;
for path in lib_paths {
match Nvml::builder().lib_path(std::ffi::OsStr::new(path)).init() {
Ok(nvml) => {
*nvml_guard = Some(nvml);
success = true;
break;
}
Err(_) => continue,
}
}
if !success {
Console::user_error(
"Failed to initialize NVML: could not load NVIDIA management library (libnvidia-ml.so.1)",
);
return vec![];
}
}
let nvml = nvml_guard.as_ref().unwrap();
// Get device count
let device_count = match nvml.device_count() {
Ok(count) => count as usize,
Err(e) => {
Console::user_error(&format!("Failed to get device count: {e}"));
return vec![];
}
};
if device_count == 0 {
Console::user_error("No GPU devices detected");
return vec![];
}
let mut device_map: std::collections::HashMap<String, GpuDevice> =
std::collections::HashMap::new();
for i in 0..device_count {
let device_index = i as u32;
// Skip this device if it's not in the visible devices list
if let Some(ref visible) = visible_devices {
if !visible.contains(&device_index) {
continue;
}
}
match nvml.device_by_index(i as u32) {
Ok(device) => {
let name = device.name().unwrap_or_else(|_| "Unknown".to_string());
let memory = device.memory_info().map(|m| m.total).unwrap_or(0);
let driver_version = nvml
.sys_driver_version()
.unwrap_or_else(|_| "Unknown".to_string());
if let Some(existing_device) = device_map.get_mut(&name) {
existing_device.count += 1;
existing_device.indices.push(i as u32);
} else {
device_map.insert(
name.clone(),
GpuDevice {
name,
memory,
driver_version,
count: 1,
indices: vec![i as u32],
},
);
}
}
Err(e) => {
Console::user_error(&format!("Failed to get device {i}: {e}"));
}
}
}
device_map.into_values().collect()
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/memory.rs | crates/worker/src/checks/hardware/memory.rs | use crate::console::Console;
use sysinfo::System;
const BYTES_TO_GB: u64 = 1024 * 1024 * 1024;
pub(crate) fn get_memory_info(sys: &System) -> (u64, u64) {
let total_memory = sys.total_memory();
let free_memory = sys.available_memory();
(total_memory, free_memory)
}
pub(crate) fn convert_to_mb(memory: u64) -> u64 {
memory / (1024 * 1024)
}
pub(crate) fn print_memory_info(total_memory: u64, free_memory: u64) {
let total_gb = (total_memory + BYTES_TO_GB / 2) / BYTES_TO_GB;
let free_gb = (free_memory + BYTES_TO_GB / 2) / BYTES_TO_GB;
Console::title("Memory Information:");
Console::info("Total Memory", &format!("{total_gb:.1} GB"));
Console::info("Free Memory", &format!("{free_gb:.1} GB"));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_print_memory_info() {
// Since this is just printing, we'll test that it doesn't panic
print_memory_info(8 * 1024 * 1024 * 1024, 4 * 1024 * 1024 * 1024); // 8GB total, 4GB free
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/storage_path.rs | crates/worker/src/checks/hardware/storage_path.rs | use super::storage;
use super::storage::APP_DIR_NAME;
use crate::{
checks::issue::{IssueReport, IssueType},
console::Console,
};
use log::info;
use std::sync::Arc;
use tokio::sync::RwLock;
pub(crate) struct StoragePathDetector {
issues: Arc<RwLock<IssueReport>>,
}
impl StoragePathDetector {
pub(crate) fn new(issues: Arc<RwLock<IssueReport>>) -> Self {
Self { issues }
}
pub(crate) async fn detect_storage_path(
&self,
storage_path_override: Option<String>,
) -> Result<(String, Option<u64>), Box<dyn std::error::Error>> {
if let Some(override_path) = storage_path_override {
self.validate_override_path(&override_path)?;
let available_space = if cfg!(target_os = "linux") {
storage::get_available_space(&override_path)
} else {
None
};
Ok((override_path, available_space))
} else if cfg!(target_os = "linux") {
self.detect_linux_storage_path().await
} else {
self.detect_cross_platform_storage_path().await
}
}
fn validate_override_path(&self, path: &str) -> Result<(), Box<dyn std::error::Error>> {
if !std::path::Path::new(path).exists() {
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::NotFound,
format!("Storage path override does not exist: {path}"),
)));
}
if let Err(e) = std::fs::metadata(path) {
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::PermissionDenied,
format!("Storage path override is not accessible: {path} ({e})"),
)));
}
Ok(())
}
async fn detect_linux_storage_path(
&self,
) -> Result<(String, Option<u64>), Box<dyn std::error::Error>> {
// First try automatic storage detection
if let Some(mount_point) = storage::find_largest_storage() {
info!(
"Automatically found largest storage mount point: {}",
mount_point.path
);
return Ok((mount_point.path, Some(mount_point.available_space)));
}
// Try fallback paths
let fallback_paths = vec![
format!("/var/lib/{}", APP_DIR_NAME),
format!("/opt/{}", APP_DIR_NAME),
format!("/home/{}", APP_DIR_NAME),
];
// Add user home directory option
let mut all_paths = fallback_paths;
if let Ok(home) = std::env::var("HOME") {
all_paths.push(format!("{home}/{APP_DIR_NAME}"));
}
for path in all_paths {
if std::path::Path::new(&path)
.parent()
.is_some_and(|p| p.exists())
{
Console::warning(&format!(
"No suitable storage mount found, using fallback path: {path}"
));
let issue_tracker = self.issues.write().await;
issue_tracker.add_issue(
IssueType::NoStoragePath,
"No suitable storage mount found, using fallback path",
);
// Get available space for the fallback path
let available_space = storage::get_available_space(&path);
return Ok((path, available_space));
}
}
// Last resort - current directory
let current_dir = std::env::current_dir()
.map(|p| {
p.join(format!("{APP_DIR_NAME}-data"))
.to_string_lossy()
.to_string()
})
.unwrap_or_else(|_| format!("./{APP_DIR_NAME}-data"));
Console::warning(&format!("Using current directory fallback: {current_dir}"));
let issue_tracker = self.issues.write().await;
issue_tracker.add_issue(IssueType::NoStoragePath, "Using current directory fallback");
// Get available space for current directory fallback
let available_space = storage::get_available_space(¤t_dir);
Ok((current_dir, available_space))
}
async fn detect_cross_platform_storage_path(
&self,
) -> Result<(String, Option<u64>), Box<dyn std::error::Error>> {
// For non-Linux systems, try user directory first
let default_path = std::env::var("HOME")
.or_else(|_| std::env::var("USERPROFILE"))
.map(|home| format!("{home}/{APP_DIR_NAME}"))
.unwrap_or_else(|_| {
std::env::current_dir()
.map(|p| {
p.join(format!("{APP_DIR_NAME}-data"))
.to_string_lossy()
.to_string()
})
.unwrap_or_else(|_| format!("./{APP_DIR_NAME}-data"))
});
Console::info(
"Storage Path",
&format!("Using platform-appropriate storage path: {default_path}"),
);
let issue_tracker = self.issues.write().await;
issue_tracker.add_issue(
IssueType::NoStoragePath,
"Using platform-appropriate storage path",
);
// Non-Linux systems don't have available space detection
Ok((default_path, None))
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/interconnect.rs | crates/worker/src/checks/hardware/interconnect.rs | use rand::RngCore;
use reqwest::Client;
use std::time::Instant;
pub(crate) struct InterconnectCheck;
impl InterconnectCheck {
pub(crate) async fn check_speeds() -> Result<(f64, f64), Box<dyn std::error::Error>> {
let client = Client::new();
// Download test: Request a 10 MB file using the query parameter.
// Cloudflare's speed test endpoint is not officially documented or guaranteed
// Consider using a more reliable speed test service or implementing our own test server
let download_bytes = 10 * 1024 * 1024; // 10 MB
let download_url = format!("https://speed.cloudflare.com/__down?bytes={download_bytes}");
let start = Instant::now();
let response = client.get(&download_url).send().await?;
// Verify we got a successful response
if !response.status().is_success() {
return Err(format!("Speed test failed with status: {}", response.status()).into());
}
let data = response.bytes().await?;
// Verify we got the expected amount of data
if data.len() != download_bytes {
return Err(format!(
"Received {} bytes but expected {} bytes",
data.len(),
download_bytes
)
.into());
}
let elapsed = start.elapsed().as_secs_f64();
let download_speed_mbps = (data.len() as f64 * 8.0) / (elapsed * 1_000_000.0);
// Upload test: Generate 10 MB of random data.
let upload_url = "https://speed.cloudflare.com/__up";
let upload_size = 5 * 1024 * 1024; // 5 MB
let mut rng = rand::rng();
let mut upload_data = vec![0u8; upload_size];
rng.fill_bytes(&mut upload_data);
let start = Instant::now();
let upload_result = tokio::time::timeout(
std::time::Duration::from_secs(30), // 30 second timeout
client
.post(upload_url)
.header("Content-Type", "application/octet-stream")
.body(upload_data)
.send(),
)
.await;
let upload_speed_mbps = match upload_result {
Ok(response) => match response {
Ok(_) => {
let elapsed = start.elapsed().as_secs_f64();
(upload_size as f64 * 8.0) / (elapsed * 1_000_000.0)
}
Err(_) => 0.0,
},
Err(_) => {
println!("Upload speed test timed out after 30 seconds");
0.0
}
};
Ok((download_speed_mbps, upload_speed_mbps))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_check_speeds() {
let result = InterconnectCheck::check_speeds().await;
println!("Test Result: {:?}", result);
// Verify the result is Ok and contains expected tuple structure
assert!(result.is_ok());
let (download_speed, upload_speed) = result.unwrap();
// Verify speeds are positive numbers
assert!(download_speed > 0.0, "Download speed should be positive");
assert!(upload_speed > 0.0, "Upload speed should be positive");
// Verify speeds are within reasonable bounds (0.1 Mbps to 10000 Mbps)
assert!(download_speed >= 0.1, "Download speed too low");
assert!(
download_speed <= 10000.0,
"Download speed unreasonably high"
);
assert!(upload_speed >= 0.1, "Upload speed too low");
assert!(upload_speed <= 10000.0, "Upload speed unreasonably high");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/storage.rs | crates/worker/src/checks/hardware/storage.rs | use crate::console::Console;
#[cfg(target_os = "linux")]
use libc::{statvfs, statvfs as statvfs_t};
use std::env;
#[cfg(target_os = "linux")]
use std::ffi::CString;
#[cfg(target_os = "linux")]
use std::fs;
pub(crate) const BYTES_TO_GB: f64 = 1024.0 * 1024.0 * 1024.0;
pub(crate) const APP_DIR_NAME: &str = "prime-worker";
#[derive(Clone)]
pub(crate) struct MountPoint {
pub path: String,
pub available_space: u64,
}
#[cfg(unix)]
pub(crate) fn get_storage_info() -> Result<(f64, f64), std::io::Error> {
let mut stat: libc::statvfs = unsafe { std::mem::zeroed() };
// Use current directory instead of hardcoded "."
let current_dir = env::current_dir()?;
let path_str = current_dir.to_string_lossy();
if unsafe { libc::statvfs(path_str.as_ptr() as *const i8, &mut stat) } != 0 {
return Err(std::io::Error::last_os_error());
}
#[cfg(target_os = "macos")]
{
#[allow(clippy::useless_conversion)]
let blocks = u64::from(stat.f_blocks);
let frsize = stat.f_frsize;
#[allow(clippy::useless_conversion)]
let bavail = u64::from(stat.f_bavail);
let total_gb = (blocks * frsize) as f64 / BYTES_TO_GB;
let free_gb = (bavail * frsize) as f64 / BYTES_TO_GB;
Ok((total_gb, free_gb))
}
#[cfg(target_os = "linux")]
{
let total_gb = (stat.f_blocks * stat.f_frsize) as f64 / BYTES_TO_GB;
let free_gb = (stat.f_bavail * stat.f_frsize) as f64 / BYTES_TO_GB;
Ok((total_gb, free_gb))
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
{
#[allow(clippy::useless_conversion)]
let blocks = u64::from(stat.f_blocks);
#[allow(clippy::useless_conversion)]
let frsize = u64::from(stat.f_frsize);
#[allow(clippy::useless_conversion)]
let bavail = u64::from(stat.f_bavail);
let total_gb = (blocks * frsize) as f64 / BYTES_TO_GB;
let free_gb = (bavail * frsize) as f64 / BYTES_TO_GB;
Ok((total_gb, free_gb))
}
}
#[cfg(not(unix))]
pub fn get_storage_info() -> Result<(f64, f64), std::io::Error> {
Err(std::io::Error::new(
std::io::ErrorKind::Unsupported,
"Storage detection not supported on this platform",
))
}
#[cfg(target_os = "linux")]
pub(crate) fn find_largest_storage() -> Option<MountPoint> {
const VALID_FS: [&str; 4] = ["ext4", "xfs", "btrfs", "zfs"];
const MIN_SPACE: u64 = 1_000_000_000; // 1GB minimum
let mut mount_points = Vec::new();
let username = std::env::var("USER").unwrap_or_else(|_| "ubuntu".to_string());
if let Ok(mounts) = fs::read_to_string("/proc/mounts") {
for line in mounts.lines() {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 3 {
continue;
}
let mount_path = parts[1];
let fstype = parts[2];
// Skip if not a valid filesystem type
if !VALID_FS.contains(&fstype) {
continue;
}
// Skip system/special mounts and Docker volumes
if mount_path.starts_with("/proc")
|| mount_path.starts_with("/sys")
|| mount_path.starts_with("/dev")
|| mount_path.starts_with("/run")
|| mount_path.starts_with("/var/lib/docker/volumes")
{
continue;
}
// Check available space on this mount point
let mut stats: statvfs_t = unsafe { std::mem::zeroed() };
let Ok(path_c) = CString::new(mount_path) else {
continue;
};
if unsafe { statvfs(path_c.as_ptr(), &mut stats) } != 0 {
continue;
}
let available_space = stats.f_bsize * stats.f_bavail;
if available_space <= MIN_SPACE {
continue;
}
// Try to find the best writable location on this mount point
if let Some(best_path) = find_best_writable_path(mount_path, &username) {
mount_points.push(MountPoint {
path: best_path,
available_space,
});
}
}
}
// Return the mount point with the most available space
mount_points.into_iter().max_by_key(|m| m.available_space)
}
/// Find the best writable path on a given mount point
#[cfg(target_os = "linux")]
fn find_best_writable_path(mount_path: &str, username: &str) -> Option<String> {
// List of potential base directories to try, in order of preference
let potential_bases = vec![
// User home directory (highest preference if on this mount)
format!("{}/home/{}", mount_path.trim_end_matches('/'), username),
// Standard application directories
format!("{}/opt", mount_path.trim_end_matches('/')),
format!("{}/var/lib", mount_path.trim_end_matches('/')),
// Cloud/ephemeral storage (common in cloud environments)
format!("{}/workspace", mount_path.trim_end_matches('/')),
format!("{}/ephemeral", mount_path.trim_end_matches('/')),
format!("{}/tmp", mount_path.trim_end_matches('/')),
// Mount root as last resort
mount_path.trim_end_matches('/').to_string(),
];
for base_dir in potential_bases {
// First check if the base directory exists and is writable
if !test_directory_writable(&base_dir) {
continue;
}
// Try to create our app directory within this base
let app_path = if base_dir == mount_path.trim_end_matches('/') {
// If using mount root, create the app directory directly
format!("{base_dir}/{APP_DIR_NAME}")
} else {
// Otherwise, nest it properly
format!("{base_dir}/{APP_DIR_NAME}")
};
// Test if we can create and write to our app directory
if test_or_create_app_directory(&app_path) {
return Some(app_path);
}
}
None
}
/// Test if a directory is writable
#[cfg(target_os = "linux")]
fn test_directory_writable(path: &str) -> bool {
// Check if directory exists
if !std::path::Path::new(path).is_dir() {
return false;
}
// Test write access using libc::access
match CString::new(path) {
Ok(path_c) => {
let result = unsafe { libc::access(path_c.as_ptr(), libc::W_OK) };
result == 0
}
Err(_) => false,
}
}
/// Test if we can create and use our app directory
#[cfg(target_os = "linux")]
fn test_or_create_app_directory(path: &str) -> bool {
let path_buf = std::path::Path::new(path);
// If directory doesn't exist, try to create it
if !path_buf.exists() && std::fs::create_dir_all(path_buf).is_err() {
return false;
}
// Verify it's a directory
if !path_buf.is_dir() {
return false;
}
// Test actual write permissions by creating a temporary file
let test_file = path_buf.join(".write_test");
match std::fs::write(&test_file, b"test") {
Ok(_) => {
// Clean up test file
let _ = std::fs::remove_file(&test_file);
true
}
Err(_) => false,
}
}
#[cfg(not(target_os = "linux"))]
pub fn find_largest_storage() -> Option<MountPoint> {
None
}
#[cfg(target_os = "linux")]
pub(crate) fn get_available_space(path: &str) -> Option<u64> {
let mut stats: statvfs_t = unsafe { std::mem::zeroed() };
if let Ok(path_c) = CString::new(path) {
if unsafe { statvfs(path_c.as_ptr(), &mut stats) } == 0 {
let available = stats.f_bsize * stats.f_bavail;
return Some(available);
}
}
None
}
#[cfg(not(target_os = "linux"))]
pub fn get_available_space(_path: &str) -> Option<u64> {
None
}
#[allow(dead_code)]
pub(crate) fn print_storage_info() {
match get_storage_info() {
Ok((total, free)) => {
Console::title("Storage Information:");
Console::info("Total Storage", &format!("{total:.1} GB"));
Console::info("Free Storage", &format!("{free:.1} GB"));
}
Err(e) => log::error!("Storage Error: {e}"),
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
#[cfg(unix)]
fn test_storage_info() {
// Ensure we're in a directory we can read
let test_dir = env::temp_dir();
env::set_current_dir(test_dir).expect("Failed to change to temp directory");
let (total, free) = get_storage_info().unwrap();
assert!(total > 0.0, "Total storage should be greater than 0");
assert!(free >= 0.0, "Free storage should be non-negative");
assert!(total >= free, "Total storage should be >= free storage");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/mod.rs | crates/worker/src/checks/hardware/mod.rs | pub(crate) mod gpu;
pub(crate) mod hardware_check;
pub(crate) mod interconnect;
pub(crate) mod memory;
pub(crate) mod storage;
pub(crate) mod storage_path;
pub(crate) use hardware_check::HardwareChecker;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/hardware/hardware_check.rs | crates/worker/src/checks/hardware/hardware_check.rs | use super::{
gpu::detect_gpu,
interconnect::InterconnectCheck,
memory::{convert_to_mb, get_memory_info, print_memory_info},
storage::{get_storage_info, BYTES_TO_GB},
storage_path::StoragePathDetector,
};
use crate::{
checks::issue::{IssueReport, IssueType},
console::Console,
};
use shared::models::node::{ComputeSpecs, CpuSpecs, GpuSpecs, Node};
use std::sync::Arc;
use sysinfo::{self, System};
use tokio::sync::RwLock;
pub(crate) struct HardwareChecker {
sys: System,
issues: Arc<RwLock<IssueReport>>,
}
impl HardwareChecker {
pub(crate) fn new(issues: Option<Arc<RwLock<IssueReport>>>) -> Self {
let mut sys = System::new_all();
sys.refresh_all();
Self {
sys,
issues: issues.unwrap_or_else(|| Arc::new(RwLock::new(IssueReport::new()))),
}
}
pub(crate) async fn check_hardware(
&mut self,
node_config: Node,
storage_path_override: Option<String>,
) -> Result<Node, Box<dyn std::error::Error>> {
let mut node_config = node_config;
self.collect_system_info(&mut node_config, storage_path_override)
.await?;
self.print_system_info(&node_config);
Ok(node_config)
}
async fn collect_system_info(
&mut self,
node_config: &mut Node,
storage_path_override: Option<String>,
) -> Result<(), Box<dyn std::error::Error>> {
Console::section("Hardware Checks");
let issue_tracker = self.issues.write().await;
if self.sys.cpus().is_empty() {
issue_tracker.add_issue(
IssueType::InsufficientCpu,
"Failed to detect CPU information",
);
return Err(Box::new(std::io::Error::other(
"Failed to detect CPU information",
)));
}
let cpu_specs = self.collect_cpu_specs()?;
let gpu_specs = self.collect_gpu_specs()?;
let (ram_mb, storage_gb) = self.collect_memory_specs()?;
// Check minimum requirements
if cpu_specs.cores.unwrap_or(0) < 4 {
issue_tracker.add_issue(IssueType::InsufficientCpu, "Minimum 4 CPU cores required");
}
if ram_mb < 8192 {
// 8GB minimum
issue_tracker.add_issue(IssueType::InsufficientMemory, "Minimum 8GB RAM required");
}
if gpu_specs.is_none() {
issue_tracker.add_issue(IssueType::NoGpu, "No GPU detected");
}
// Drop the write lock before calling async method
drop(issue_tracker);
// Detect storage path using dedicated detector
let storage_path_detector = StoragePathDetector::new(self.issues.clone());
let (storage_path, available_space) = storage_path_detector
.detect_storage_path(storage_path_override)
.await?;
if available_space.is_some() && available_space.unwrap() < 1000 {
let issue_tracker = self.issues.write().await;
issue_tracker.add_issue(
IssueType::InsufficientStorage,
"Minimum 1000GB storage required",
);
}
let storage_gb_value = match available_space {
Some(space) => (space as f64 / BYTES_TO_GB) as u32,
None => storage_gb,
};
// Check network speeds
Console::title("Network Speed Test:");
Console::progress("Starting network speed test...");
match InterconnectCheck::check_speeds().await {
Ok((download_speed, upload_speed)) => {
Console::info("Download Speed", &format!("{download_speed:.2} Mbps"));
Console::info("Upload Speed", &format!("{upload_speed:.2} Mbps"));
if download_speed < 50.0 || upload_speed < 50.0 {
let issue_tracker = self.issues.write().await;
issue_tracker.add_issue(
IssueType::NetworkConnectivityIssue,
"Network speed below recommended 50Mbps",
);
}
}
Err(_) => {
let issue_tracker = self.issues.write().await;
issue_tracker.add_issue(
IssueType::NetworkConnectivityIssue,
"Failed to perform network speed test",
);
Console::warning("Failed to perform network speed test");
}
}
node_config.compute_specs = Some(ComputeSpecs {
cpu: Some(cpu_specs),
ram_mb: Some(ram_mb),
storage_gb: Some(storage_gb_value),
storage_path,
gpu: gpu_specs,
});
Ok(())
}
fn collect_cpu_specs(&self) -> Result<CpuSpecs, Box<dyn std::error::Error>> {
Ok(CpuSpecs {
cores: Some(self.sys.cpus().len() as u32),
model: Some(self.sys.cpus()[0].brand().to_string()),
})
}
fn collect_gpu_specs(&self) -> Result<Option<GpuSpecs>, Box<dyn std::error::Error>> {
let gpu_specs = detect_gpu();
if gpu_specs.is_empty() {
return Ok(None);
}
let main_gpu = gpu_specs
.into_iter()
.max_by_key(|gpu| gpu.count.unwrap_or(0));
Ok(main_gpu)
}
fn collect_memory_specs(&self) -> Result<(u32, u32), Box<dyn std::error::Error>> {
let (total_memory, _) = get_memory_info(&self.sys);
let (total_storage, _) = get_storage_info()?;
// Convert bytes to MB for RAM and GB for storage
let ram_mb = convert_to_mb(total_memory);
let storage_gb = total_storage;
Ok((ram_mb as u32, storage_gb as u32))
}
fn print_system_info(&self, node_config: &Node) {
// Print CPU Info
Console::title("CPU Information:");
if let Some(compute_specs) = &node_config.compute_specs {
if let Some(cpu) = &compute_specs.cpu {
Console::info("Cores", &cpu.cores.unwrap_or(0).to_string());
Console::info(
"Model",
cpu.model.as_ref().unwrap_or(&"Unknown".to_string()),
);
}
}
// Print Memory Info
if let Some(compute_specs) = &node_config.compute_specs {
let (total_memory, free_memory) = get_memory_info(&self.sys);
print_memory_info(total_memory, free_memory);
// Print Storage Info
if let Some(storage_gb) = &compute_specs.storage_gb {
Console::title("Storage Information:");
Console::info("Total Storage", &format!("{storage_gb} GB"));
}
Console::info(
"Storage Path for docker mounts",
&compute_specs.storage_path,
);
}
// Print GPU Info
if let Some(compute_specs) = &node_config.compute_specs {
if let Some(gpu) = &compute_specs.gpu {
Console::title("GPU Information:");
Console::info("Count", &gpu.count.unwrap_or(0).to_string());
Console::info(
"Model",
gpu.model.as_ref().unwrap_or(&"Unknown".to_string()),
);
// Convert memory from MB to GB and round
let memory_gb = if let Some(memory_mb) = gpu.memory_mb {
memory_mb as f64 / 1024.0
} else {
0.0
};
Console::info("Memory", &format!("{memory_gb:.0} GB"));
}
} else {
Console::warning("No compute specs available");
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/software/software_check.rs | crates/worker/src/checks/software/software_check.rs | use super::{docker::check_docker_installed, port::check_port_available};
use crate::checks::issue::IssueReport;
use crate::console::Console;
use shared::models::node::Node;
use std::sync::Arc;
use tokio::sync::RwLock;
pub(crate) struct SoftwareChecker {
issues: Arc<RwLock<IssueReport>>,
}
impl SoftwareChecker {
pub(crate) fn new(issues: Option<Arc<RwLock<IssueReport>>>) -> Self {
Self {
issues: issues.unwrap_or_else(|| Arc::new(RwLock::new(IssueReport::new()))),
}
}
pub(crate) async fn check_software(
&self,
node_config: &Node,
) -> Result<(), Box<dyn std::error::Error>> {
// Check Docker installation and connectivity
Console::title("Docker:");
check_docker_installed(&self.issues).await?;
// Check port availability
Console::title("Port:");
check_port_available(&self.issues, node_config.port).await?;
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/software/docker.rs | crates/worker/src/checks/software/docker.rs | use crate::checks::issue::{IssueReport, IssueType};
use crate::console::Console;
use bollard::container::ListContainersOptions;
use bollard::Docker;
use std::sync::Arc;
use tokio::sync::RwLock;
pub(crate) async fn check_docker_installed(
issues: &Arc<RwLock<IssueReport>>,
) -> Result<(), Box<dyn std::error::Error>> {
let issue_tracker = issues.read().await;
let docker_path = std::process::Command::new("which")
.arg("docker")
.output()
.map_err(|e| {
issue_tracker.add_issue(
IssueType::DockerNotInstalled,
format!("Failed to execute 'which docker': {e}"),
);
e
})?;
if !docker_path.status.success() {
issue_tracker.add_issue(IssueType::DockerNotInstalled, "Docker is not installed");
return Ok(());
}
let docker_info = std::process::Command::new("docker").output().map_err(|e| {
issue_tracker.add_issue(
IssueType::DockerNotInstalled,
format!(
"Failed to execute 'docker ps': {e}. You may need to add your user to the docker group."
)
);
e
})?;
if !docker_info.status.success() {
issue_tracker.add_issue(
IssueType::DockerNotInstalled,
"Docker daemon is not running",
);
return Ok(());
}
// Check if Docker API is accessible with proper permissions
match Docker::connect_with_unix_defaults() {
Ok(docker) => {
// Try to list containers to verify permissions
match docker
.list_containers::<String>(Some(ListContainersOptions {
all: true,
..Default::default()
}))
.await
{
Ok(_) => Console::success("Docker API accessible"),
Err(e) => {
issue_tracker.add_issue(
IssueType::DockerNotInstalled,
format!("Docker API permission denied: {e}. You may need to add your user to the docker group. To fix this, run: 'sudo usermod -aG docker $USER' and then log out and back in."),
);
}
}
}
Err(e) => {
issue_tracker.add_issue(
IssueType::DockerNotInstalled,
format!("Failed to connect to Docker API: {e}. You may need to add your user to the docker group."),
);
}
}
Console::success("Docker ready");
// Check if NVIDIA Container Toolkit is installed using which command
let nvidia_toolkit = std::process::Command::new("which")
.arg("nvidia-ctk")
.output()
.map_err(|e| {
issue_tracker.add_issue(
IssueType::ContainerToolkitNotInstalled,
format!("Failed to check for nvidia-ctk: {e}"),
);
e
})?;
if nvidia_toolkit.status.success() {
// If which succeeds, check if it's working properly
let version_check = std::process::Command::new("nvidia-ctk")
.arg("--version")
.output()
.map_err(|e| {
issue_tracker.add_issue(
IssueType::ContainerToolkitNotInstalled,
format!("Failed to run nvidia-ctk: {e}"),
);
e
})?;
if version_check.status.success() {
Console::success("NVIDIA toolkit ready");
} else {
issue_tracker.add_issue(
IssueType::ContainerToolkitNotInstalled,
"NVIDIA toolkit not configured properly",
);
}
} else {
issue_tracker.add_issue(
IssueType::ContainerToolkitNotInstalled,
"NVIDIA toolkit not found",
);
}
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/software/port.rs | crates/worker/src/checks/software/port.rs | use crate::checks::issue::{IssueReport, IssueType};
use crate::console::Console;
use anyhow::Result;
use std::net::TcpListener;
use std::sync::Arc;
use tokio::sync::RwLock;
fn try_bind_port(port: u16) -> Result<()> {
let addr = format!("0.0.0.0:{port}");
// If bind succeeds, port is available; if it fails, port is taken.
let listener = TcpListener::bind(addr)?;
drop(listener); // Release the port immediately.
Ok(())
}
pub(crate) async fn check_port_available(
issues: &Arc<RwLock<IssueReport>>,
port: u16,
) -> Result<()> {
let issue_tracker = issues.read().await;
match try_bind_port(port) {
Ok(_) => Console::success("Port is available"),
Err(e) => {
issue_tracker.add_issue(
IssueType::PortUnavailable,
format!("Port {port} is not available: {e}"),
);
}
}
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/checks/software/mod.rs | crates/worker/src/checks/software/mod.rs | pub(crate) mod docker;
pub(crate) mod port;
pub(crate) mod software_check;
pub(crate) use software_check::SoftwareChecker;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/lib.rs | crates/orchestrator/src/lib.rs | mod api;
mod discovery;
mod metrics;
mod models;
mod node;
mod p2p;
mod plugins;
mod scheduler;
mod status_update;
mod store;
mod utils;
pub use api::server::start_server;
pub use discovery::monitor::DiscoveryMonitor;
pub use metrics::sync_service::MetricsSyncService;
pub use metrics::webhook_sender::MetricsWebhookSender;
pub use metrics::MetricsContext;
pub use node::invite::NodeInviter;
pub use p2p::Service as P2PService;
pub use plugins::node_groups::NodeGroupConfiguration;
pub use plugins::node_groups::NodeGroupsPlugin;
pub use plugins::webhook::WebhookConfig;
pub use plugins::webhook::WebhookPlugin;
pub use plugins::SchedulerPlugin;
pub use plugins::StatusUpdatePlugin;
pub use scheduler::Scheduler;
pub use status_update::NodeStatusUpdater;
pub use store::core::RedisStore;
pub use store::core::StoreContext;
pub use utils::loop_heartbeats::LoopHeartbeats;
#[derive(clap::Parser, Clone, Copy, clap::ValueEnum, Debug, PartialEq)]
pub enum ServerMode {
ApiOnly,
ProcessorOnly,
Full,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/main.rs | crates/orchestrator/src/main.rs | use anyhow::Result;
use clap::Parser;
use log::debug;
use log::error;
use log::info;
use log::LevelFilter;
use shared::utils::google_cloud::GcsStorageProvider;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::sync::Arc;
use tokio::task::JoinSet;
use tokio_util::sync::CancellationToken;
use url::Url;
use orchestrator::{
start_server, DiscoveryMonitor, LoopHeartbeats, MetricsContext, MetricsSyncService,
MetricsWebhookSender, NodeGroupConfiguration, NodeGroupsPlugin, NodeInviter, NodeStatusUpdater,
P2PService, RedisStore, Scheduler, SchedulerPlugin, ServerMode, StatusUpdatePlugin,
StoreContext, WebhookConfig, WebhookPlugin,
};
#[derive(Parser)]
struct Args {
// Server mode
#[arg(long, default_value = "full")]
mode: String,
/// RPC URL
#[arg(short = 'r', long, default_value = "http://localhost:8545")]
rpc_url: String,
/// Owner key
#[arg(short = 'k', long)]
coordinator_key: String,
/// Compute pool id
#[arg(long, default_value = "0")]
compute_pool_id: u32,
/// Domain id
#[arg(short = 'd', long, default_value = "0")]
domain_id: u32,
/// External ip - advertised to workers
#[arg(short = 'e', long)]
host: Option<String>,
/// Port
#[arg(short = 'p', long, default_value = "8090")]
port: u16,
/// External url - advertised to workers
#[arg(short = 'u', long)]
url: Option<String>,
/// Discovery refresh interval
#[arg(short = 'i', long, default_value = "10")]
discovery_refresh_interval: u64,
/// Redis store url
#[arg(short = 's', long, default_value = "redis://localhost:6380")]
redis_store_url: String,
/// Discovery URLs (comma-separated)
#[arg(long, default_value = "http://localhost:8089", value_delimiter = ',')]
discovery_urls: Vec<String>,
/// Admin api key
#[arg(short = 'a', long, default_value = "admin")]
admin_api_key: String,
/// Disable instance ejection from chain
#[arg(long)]
disable_ejection: bool,
/// Hourly s3 upload limit
#[arg(long, default_value = "2")]
hourly_s3_upload_limit: i64,
/// S3 bucket name
#[arg(long)]
bucket_name: Option<String>,
/// Log level
#[arg(short = 'l', long, default_value = "info")]
log_level: String,
/// Node group management interval
#[arg(long, default_value = "10")]
node_group_management_interval: u64,
/// Max healthy nodes with same endpoint
#[arg(long, default_value = "1")]
max_healthy_nodes_with_same_endpoint: u32,
/// Libp2p port
#[arg(long, default_value = "4004")]
libp2p_port: u16,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let log_level = match args.log_level.as_str() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
"trace" => LevelFilter::Trace,
_ => anyhow::bail!("invalid log level: {}", args.log_level),
};
env_logger::Builder::new()
.filter_level(log_level)
.format_timestamp(None)
.filter_module("iroh", log::LevelFilter::Warn)
.filter_module("iroh_net", log::LevelFilter::Warn)
.filter_module("iroh_quinn", log::LevelFilter::Warn)
.filter_module("iroh_base", log::LevelFilter::Warn)
.filter_module("tracing::span", log::LevelFilter::Warn)
.init();
let server_mode = match args.mode.as_str() {
"api" => ServerMode::ApiOnly,
"processor" => ServerMode::ProcessorOnly,
"full" => ServerMode::Full,
_ => anyhow::bail!("invalid server mode: {}", args.mode),
};
debug!("Log level: {log_level}");
debug!("Server mode: {server_mode:?}");
let metrics_context = Arc::new(MetricsContext::new(args.compute_pool_id.to_string()));
let heartbeats = Arc::new(LoopHeartbeats::new(&server_mode));
let compute_pool_id = args.compute_pool_id;
let domain_id = args.domain_id;
let coordinator_key = args.coordinator_key;
let rpc_url: Url = args.rpc_url.parse().unwrap();
let mut tasks: JoinSet<Result<()>> = JoinSet::new();
let wallet = Wallet::new(&coordinator_key, rpc_url).unwrap_or_else(|err| {
error!("Error creating wallet: {err:?}");
std::process::exit(1);
});
let store = Arc::new(RedisStore::new(&args.redis_store_url));
let store_context = Arc::new(StoreContext::new(store.clone()));
let keypair = p2p::Keypair::generate_ed25519();
let cancellation_token = CancellationToken::new();
let (p2p_service, invite_tx, get_task_logs_tx, restart_task_tx) = {
match P2PService::new(
keypair,
args.libp2p_port,
cancellation_token.clone(),
wallet.clone(),
) {
Ok(res) => {
info!("p2p service initialized successfully");
res
}
Err(e) => {
error!("failed to initialize p2p service: {e}");
std::process::exit(1);
}
}
};
tokio::task::spawn(p2p_service.run());
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let group_store_context = store_context.clone();
let mut scheduler_plugins: Vec<SchedulerPlugin> = Vec::new();
let mut status_update_plugins: Vec<StatusUpdatePlugin> = vec![];
let mut node_groups_plugin: Option<Arc<NodeGroupsPlugin>> = None;
let mut webhook_plugins: Vec<WebhookPlugin> = vec![];
let configs = std::env::var("WEBHOOK_CONFIGS").unwrap_or_default();
if !configs.is_empty() {
match serde_json::from_str::<Vec<WebhookConfig>>(&configs) {
Ok(configs) => {
for config in configs {
let plugin = WebhookPlugin::new(config);
let plugin_clone = plugin.clone();
webhook_plugins.push(plugin_clone);
status_update_plugins.push(plugin.into());
info!("Plugin: Webhook plugin initialized");
}
}
Err(e) => {
error!("Failed to parse webhook configs from environment: {e}");
}
}
} else {
info!("No webhook configurations provided");
}
let webhook_sender_store = store_context.clone();
let webhook_plugins_clone = webhook_plugins.clone();
if !webhook_plugins_clone.is_empty() && server_mode != ServerMode::ApiOnly {
tasks.spawn(async move {
let mut webhook_sender = MetricsWebhookSender::new(
webhook_sender_store.clone(),
webhook_plugins_clone.clone(),
compute_pool_id,
);
if let Err(e) = webhook_sender.run().await {
error!("Error running webhook sender: {e}");
}
Ok(())
});
}
// Load node group configurations from environment variable
let node_group_configs = std::env::var("NODE_GROUP_CONFIGS").unwrap_or_default();
if !node_group_configs.is_empty() {
match serde_json::from_str::<Vec<NodeGroupConfiguration>>(&node_group_configs) {
Ok(configs) if !configs.is_empty() => {
let node_groups_heartbeats = heartbeats.clone();
let group_plugin = Arc::new(NodeGroupsPlugin::new(
configs,
store.clone(),
group_store_context.clone(),
Some(node_groups_heartbeats.clone()),
Some(webhook_plugins.clone()),
));
// Register the plugin as a task observer
group_store_context
.task_store
.add_observer(group_plugin.clone())
.await;
let status_group_plugin = group_plugin.clone();
let group_plugin_for_server = group_plugin.clone();
node_groups_plugin = Some(group_plugin_for_server);
scheduler_plugins.push(group_plugin.into());
status_update_plugins.push(status_group_plugin.into());
info!("Plugin: Node group plugin initialized");
}
Ok(_) => {
info!(
"No node group configurations provided in environment, skipping plugin setup"
);
}
Err(e) => {
error!("Failed to parse node group configurations from environment: {e}");
std::process::exit(1);
}
}
}
let scheduler = Scheduler::new(store_context.clone(), scheduler_plugins);
// Only spawn processor tasks if in ProcessorOnly or Full mode
if matches!(server_mode, ServerMode::ProcessorOnly | ServerMode::Full) {
// Start metrics sync service to centralize metrics from Redis to Prometheus
let metrics_sync_store_context = store_context.clone();
let metrics_sync_context = metrics_context.clone();
let metrics_sync_node_groups = node_groups_plugin.clone();
tasks.spawn(async move {
let sync_service = MetricsSyncService::new(
metrics_sync_store_context,
metrics_sync_context,
server_mode,
10,
metrics_sync_node_groups,
);
sync_service.run().await
});
if let Some(group_plugin) = node_groups_plugin.clone() {
tasks.spawn(async move {
group_plugin
.run_group_management_loop(args.node_group_management_interval)
.await
});
}
// Create status_update_plugins for discovery monitor
let mut discovery_status_update_plugins: Vec<StatusUpdatePlugin> = vec![];
// Add webhook plugins to discovery status update plugins
for plugin in &webhook_plugins {
discovery_status_update_plugins.push(plugin.into());
}
// Add node groups plugin if available
if let Some(group_plugin) = node_groups_plugin.clone() {
discovery_status_update_plugins.push(group_plugin.into());
}
let discovery_store_context = store_context.clone();
let discovery_heartbeats = heartbeats.clone();
tasks.spawn({
let wallet = wallet.clone();
async move {
let monitor = DiscoveryMonitor::new(
wallet,
compute_pool_id,
args.discovery_refresh_interval,
args.discovery_urls,
discovery_store_context.clone(),
discovery_heartbeats.clone(),
args.max_healthy_nodes_with_same_endpoint,
discovery_status_update_plugins,
);
monitor.run().await
}
});
let inviter_store_context = store_context.clone();
let inviter_heartbeats = heartbeats.clone();
let wallet = wallet.clone();
let inviter = match NodeInviter::new(
wallet,
compute_pool_id,
domain_id,
args.host.as_deref(),
Some(&args.port),
args.url.as_deref(),
inviter_store_context.clone(),
inviter_heartbeats.clone(),
invite_tx,
) {
Ok(inviter) => {
info!("Node inviter initialized successfully");
inviter
}
Err(e) => {
error!("Failed to initialize node inviter: {e}");
std::process::exit(1);
}
};
tasks.spawn(async move { inviter.run().await });
// Create status_update_plugins for status updater
let mut status_updater_plugins: Vec<StatusUpdatePlugin> = vec![];
// Add webhook plugins to status updater plugins
for plugin in &webhook_plugins {
status_updater_plugins.push(plugin.into());
}
// Add node groups plugin if available
if let Some(group_plugin) = node_groups_plugin.clone() {
status_updater_plugins.push(group_plugin.into());
}
let status_update_store_context = store_context.clone();
let status_update_heartbeats = heartbeats.clone();
let status_update_metrics = metrics_context.clone();
tasks.spawn({
let contracts = contracts.clone();
async move {
let status_updater = NodeStatusUpdater::new(
status_update_store_context.clone(),
15,
None,
contracts,
compute_pool_id,
args.disable_ejection,
status_update_heartbeats.clone(),
status_updater_plugins,
status_update_metrics,
);
status_updater.run().await
}
});
}
let port = args.port;
let server_store_context = store_context.clone();
let s3_credentials = std::env::var("S3_CREDENTIALS").ok();
let storage_provider: Option<Arc<dyn shared::utils::StorageProvider>> =
match (args.bucket_name.as_ref(), s3_credentials) {
(Some(bucket_name), Some(s3_credentials))
if !bucket_name.is_empty() && !s3_credentials.is_empty() =>
{
let gcs_storage = GcsStorageProvider::new(bucket_name, &s3_credentials)
.await
.unwrap_or_else(|_| panic!("Failed to create GCS storage provider"));
Some(Arc::new(gcs_storage) as Arc<dyn shared::utils::StorageProvider>)
}
_ => {
info!("Bucket name or S3 credentials not provided, storage provider disabled");
None
}
};
// Always start server regardless of mode
tokio::select! {
res = start_server(
"0.0.0.0",
port,
server_store_context.clone(),
args.admin_api_key,
storage_provider,
heartbeats.clone(),
store.clone(),
args.hourly_s3_upload_limit,
Some(contracts),
compute_pool_id,
server_mode,
scheduler,
node_groups_plugin,
metrics_context,
get_task_logs_tx,
restart_task_tx,
) => {
if let Err(e) = res {
error!("Server error: {e}");
}
}
Some(res) = tasks.join_next() => {
if let Err(e) = res? {
error!("Task error: {e}");
}
}
_ = tokio::signal::ctrl_c() => {
error!("Shutdown signal received");
}
}
// TODO: use cancellation token to gracefully shutdown tasks
cancellation_token.cancel();
tasks.shutdown().await;
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/discovery/mod.rs | crates/orchestrator/src/discovery/mod.rs | pub(crate) mod monitor;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/discovery/monitor.rs | crates/orchestrator/src/discovery/monitor.rs | use crate::models::node::NodeStatus;
use crate::models::node::OrchestratorNode;
use crate::plugins::StatusUpdatePlugin;
use crate::store::core::StoreContext;
use crate::utils::loop_heartbeats::LoopHeartbeats;
use alloy::primitives::Address;
use alloy::primitives::U256;
use anyhow::Error;
use anyhow::Result;
use chrono::Utc;
use log::{error, info};
use shared::models::api::ApiResponse;
use shared::models::node::DiscoveryNode;
use shared::security::request_signer::sign_request_with_nonce;
use shared::web3::wallet::Wallet;
use std::sync::Arc;
use std::time::Duration;
use tokio::time::interval;
pub struct DiscoveryMonitor {
coordinator_wallet: Wallet,
compute_pool_id: u32,
interval_s: u64,
discovery_urls: Vec<String>,
store_context: Arc<StoreContext>,
heartbeats: Arc<LoopHeartbeats>,
http_client: reqwest::Client,
max_healthy_nodes_with_same_endpoint: u32,
status_change_handlers: Vec<StatusUpdatePlugin>,
}
impl DiscoveryMonitor {
#[allow(clippy::too_many_arguments)]
pub fn new(
coordinator_wallet: Wallet,
compute_pool_id: u32,
interval_s: u64,
discovery_urls: Vec<String>,
store_context: Arc<StoreContext>,
heartbeats: Arc<LoopHeartbeats>,
max_healthy_nodes_with_same_endpoint: u32,
status_change_handlers: Vec<StatusUpdatePlugin>,
) -> Self {
Self {
coordinator_wallet,
compute_pool_id,
interval_s,
discovery_urls,
store_context,
heartbeats,
http_client: reqwest::Client::new(),
max_healthy_nodes_with_same_endpoint,
status_change_handlers,
}
}
async fn handle_status_change(&self, node: &OrchestratorNode, old_status: NodeStatus) {
for handler in &self.status_change_handlers {
if let Err(e) = handler.handle_status_change(node, &old_status).await {
error!("Status change handler failed: {e}");
}
}
}
async fn update_node_status(
&self,
node_address: &Address,
new_status: NodeStatus,
) -> Result<(), Error> {
// Get the current node to know the old status
let old_status = match self.store_context.node_store.get_node(node_address).await? {
Some(node) => node.status,
None => return Err(anyhow::anyhow!("Node not found: {}", node_address)),
};
// Update the status in the store
self.store_context
.node_store
.update_node_status(node_address, new_status.clone())
.await?;
// Get the updated node and trigger status change handlers
if let Some(updated_node) = self.store_context.node_store.get_node(node_address).await? {
self.handle_status_change(&updated_node, old_status).await;
}
Ok(())
}
pub async fn run(&self) -> Result<(), Error> {
let mut interval = interval(Duration::from_secs(self.interval_s));
loop {
interval.tick().await;
match self.get_nodes().await {
Ok(nodes) => {
info!(
"Successfully synced {} nodes from discovery service",
nodes.len()
);
}
Err(e) => {
error!("Error syncing nodes from discovery service: {e}");
}
}
self.heartbeats.update_monitor();
}
}
async fn fetch_nodes_from_single_discovery(
&self,
discovery_url: &str,
) -> Result<Vec<DiscoveryNode>, Error> {
let discovery_route = format!("/api/pool/{}", self.compute_pool_id);
let address = self.coordinator_wallet.address().to_string();
let signature =
match sign_request_with_nonce(&discovery_route, &self.coordinator_wallet, None).await {
Ok(sig) => sig,
Err(e) => {
error!("Failed to sign discovery request: {e}");
return Ok(Vec::new());
}
};
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"x-address",
reqwest::header::HeaderValue::from_str(&address)?,
);
headers.insert(
"x-signature",
reqwest::header::HeaderValue::from_str(&signature.signature)?,
);
let response = match self
.http_client
.get(format!("{discovery_url}{discovery_route}"))
.query(&[("nonce", signature.nonce)])
.headers(headers)
.send()
.await
{
Ok(resp) => resp,
Err(e) => {
error!("Failed to fetch nodes from discovery service {discovery_url}: {e}");
return Ok(Vec::new());
}
};
let response_text = match response.text().await {
Ok(text) => text,
Err(e) => {
error!("Failed to read discovery response from {discovery_url}: {e}");
return Ok(Vec::new());
}
};
let parsed_response: ApiResponse<Vec<DiscoveryNode>> =
match serde_json::from_str(&response_text) {
Ok(resp) => resp,
Err(e) => {
error!("Failed to parse discovery response from {discovery_url}: {e}");
return Ok(Vec::new());
}
};
let nodes = parsed_response.data;
let nodes = nodes
.into_iter()
.filter(|node| node.is_validated)
.collect::<Vec<DiscoveryNode>>();
Ok(nodes)
}
pub async fn fetch_nodes_from_discovery(&self) -> Result<Vec<DiscoveryNode>, Error> {
let mut all_nodes = Vec::new();
let mut any_success = false;
for discovery_url in &self.discovery_urls {
match self.fetch_nodes_from_single_discovery(discovery_url).await {
Ok(nodes) => {
info!(
"Successfully fetched {} nodes from {}",
nodes.len(),
discovery_url
);
all_nodes.extend(nodes);
any_success = true;
}
Err(e) => {
error!("Failed to fetch nodes from {discovery_url}: {e}");
}
}
}
if !any_success {
error!("Failed to fetch nodes from all discovery services");
return Ok(Vec::new());
}
// Remove duplicates based on node ID
let mut unique_nodes = Vec::new();
let mut seen_ids = std::collections::HashSet::new();
for node in all_nodes {
if seen_ids.insert(node.node.id.clone()) {
unique_nodes.push(node);
}
}
info!(
"Total unique nodes after deduplication: {}",
unique_nodes.len()
);
Ok(unique_nodes)
}
async fn count_healthy_nodes_with_same_endpoint(
&self,
node_address: Address,
ip_address: &str,
port: u16,
) -> Result<u32, Error> {
let nodes = self.store_context.node_store.get_nodes().await?;
Ok(nodes
.iter()
.filter(|other_node| {
other_node.address != node_address
&& other_node.ip_address == ip_address
&& other_node.port == port
&& other_node.status == NodeStatus::Healthy
})
.count() as u32)
}
async fn sync_single_node_with_discovery(
&self,
discovery_node: &DiscoveryNode,
) -> Result<(), Error> {
let node_address = discovery_node.node.id.parse::<Address>()?;
// Check if there's any healthy node with the same IP and port
let count_healthy_nodes_with_same_endpoint = self
.count_healthy_nodes_with_same_endpoint(
node_address,
&discovery_node.node.ip_address,
discovery_node.node.port,
)
.await?;
match self.store_context.node_store.get_node(&node_address).await {
Ok(Some(existing_node)) => {
// If there's a healthy node with same IP and port, and this node isn't healthy, mark it dead
if count_healthy_nodes_with_same_endpoint > 0
&& existing_node.status != NodeStatus::Healthy
{
info!(
"Node {} shares endpoint {}:{} with a healthy node, marking as dead",
node_address, discovery_node.node.ip_address, discovery_node.node.port
);
if let Err(e) = self
.update_node_status(&node_address, NodeStatus::Dead)
.await
{
error!("Error updating node status: {e}");
}
return Ok(());
}
if discovery_node.is_validated && !discovery_node.is_provider_whitelisted {
info!(
"Node {node_address} is validated but not provider whitelisted, marking as ejected"
);
if let Err(e) = self
.update_node_status(&node_address, NodeStatus::Ejected)
.await
{
error!("Error updating node status: {e}");
}
}
// If a node is already in ejected state (and hence cannot recover) but the provider
// gets whitelisted, we need to mark it as dead so it can actually recover again
if discovery_node.is_validated
&& discovery_node.is_provider_whitelisted
&& existing_node.status == NodeStatus::Ejected
{
info!(
"Node {node_address} is validated and provider whitelisted. Local store status was ejected, marking as dead so node can recover"
);
if let Err(e) = self
.update_node_status(&node_address, NodeStatus::Dead)
.await
{
error!("Error updating node status: {e}");
}
}
if !discovery_node.is_active && existing_node.status == NodeStatus::Healthy {
// Node is active False but we have it in store and it is healthy
// This means that the node likely got kicked by e.g. the validator
// Add a grace period check to avoid immediately marking nodes that just became healthy
let should_mark_inactive =
if let Some(last_status_change) = existing_node.last_status_change {
let grace_period = chrono::Duration::minutes(5); // 5 minute grace period
let now = chrono::Utc::now();
now.signed_duration_since(last_status_change) > grace_period
} else {
// If no last_status_change, assume it's been healthy for a while
true
};
if should_mark_inactive {
info!(
"Node {node_address} is no longer active on chain, marking as ejected"
);
if !discovery_node.is_provider_whitelisted {
if let Err(e) = self
.update_node_status(&node_address, NodeStatus::Ejected)
.await
{
error!("Error updating node status: {e}");
}
} else if let Err(e) = self
.update_node_status(&node_address, NodeStatus::Dead)
.await
{
error!("Error updating node status: {e}");
}
} else {
info!(
"Node {node_address} is no longer active on chain but recently became healthy, waiting before marking inactive"
);
}
}
if existing_node.ip_address != discovery_node.node.ip_address {
info!(
"Node {} IP changed from {} to {}",
node_address, existing_node.ip_address, discovery_node.node.ip_address
);
let mut node = existing_node.clone();
node.ip_address = discovery_node.node.ip_address.clone();
let _ = self.store_context.node_store.add_node(node.clone()).await;
}
if existing_node.location.is_none() && discovery_node.location.is_some() {
info!(
"Node {} location changed from None to {:?}",
node_address, discovery_node.location
);
if let Some(location) = &discovery_node.location {
let _ = self
.store_context
.node_store
.update_node_location(&node_address, location)
.await;
}
}
if existing_node.status == NodeStatus::Dead {
if let (Some(last_change), Some(last_updated)) = (
existing_node.last_status_change,
discovery_node.last_updated,
) {
if last_change < last_updated {
info!("Node {node_address} is dead but has been updated on discovery, marking as discovered");
if existing_node.compute_specs != discovery_node.compute_specs {
info!(
"Node {node_address} compute specs changed, marking as discovered"
);
let mut node = existing_node.clone();
node.compute_specs = discovery_node.compute_specs.clone();
let _ = self.store_context.node_store.add_node(node.clone()).await;
}
if let Err(e) = self
.update_node_status(&node_address, NodeStatus::Discovered)
.await
{
error!("Error updating node status: {e}");
}
}
}
}
if let Some(balance) = discovery_node.latest_balance {
if balance == U256::ZERO {
info!("Node {node_address} has zero balance, marking as low balance");
if let Err(e) = self
.update_node_status(&node_address, NodeStatus::LowBalance)
.await
{
error!("Error updating node status: {e}");
}
}
}
}
Ok(None) => {
// Don't add new node if there's already a healthy node with same IP and port
if count_healthy_nodes_with_same_endpoint
>= self.max_healthy_nodes_with_same_endpoint
{
info!(
"Skipping new node {} as endpoint {}:{} is already used by a healthy node",
node_address, discovery_node.node.ip_address, discovery_node.node.port
);
return Ok(());
}
info!("Discovered new validated node: {node_address}");
let mut node = OrchestratorNode::from(discovery_node.clone());
node.first_seen = Some(Utc::now());
let _ = self.store_context.node_store.add_node(node.clone()).await;
}
Err(e) => {
error!("Error syncing node with discovery: {e}");
return Err(e);
}
}
Ok(())
}
async fn get_nodes(&self) -> Result<Vec<OrchestratorNode>, Error> {
let discovery_nodes = self.fetch_nodes_from_discovery().await?;
for discovery_node in &discovery_nodes {
if let Err(e) = self.sync_single_node_with_discovery(discovery_node).await {
error!("Error syncing node with discovery: {e}");
}
}
Ok(discovery_nodes
.into_iter()
.map(OrchestratorNode::from)
.collect())
}
}
#[cfg(test)]
mod tests {
use alloy::primitives::Address;
use shared::models::node::{ComputeSpecs, Node};
use url::Url;
use super::*;
use crate::models::node::NodeStatus;
use crate::store::core::{RedisStore, StoreContext};
use crate::ServerMode;
#[tokio::test]
async fn test_sync_single_node_with_discovery() {
let node_address = "0x1234567890123456789012345678901234567890";
let discovery_node = DiscoveryNode {
is_validated: true,
is_provider_whitelisted: true,
is_active: false,
node: Node {
id: node_address.to_string(),
provider_address: node_address.to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
compute_pool_id: 1,
compute_specs: Some(ComputeSpecs {
ram_mb: Some(1024),
storage_gb: Some(10),
..Default::default()
}),
..Default::default()
},
is_blacklisted: false,
..Default::default()
};
let mut orchestrator_node = OrchestratorNode::from(discovery_node.clone());
orchestrator_node.status = NodeStatus::Ejected;
orchestrator_node.address = discovery_node.node.id.parse::<Address>().unwrap();
orchestrator_node.first_seen = Some(Utc::now());
orchestrator_node.compute_specs = Some(ComputeSpecs {
gpu: None,
cpu: None,
ram_mb: Some(1024),
storage_gb: Some(10),
..Default::default()
});
let store = Arc::new(RedisStore::new_test());
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
let store_context = Arc::new(StoreContext::new(store.clone()));
let discovery_store_context = store_context.clone();
let _ = store_context
.node_store
.add_node(orchestrator_node.clone())
.await;
let fake_wallet = Wallet::new(
"0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97",
Url::parse("http://localhost:8545").unwrap(),
)
.unwrap();
let mode = ServerMode::Full;
let discovery_monitor = DiscoveryMonitor::new(
fake_wallet,
1,
10,
vec!["http://localhost:8080".to_string()],
discovery_store_context,
Arc::new(LoopHeartbeats::new(&mode)),
1,
vec![],
);
let store_context_clone = store_context.clone();
let node_from_store = store_context_clone
.node_store
.get_node(&orchestrator_node.address)
.await
.unwrap();
assert!(node_from_store.is_some());
if let Some(node) = node_from_store {
assert_eq!(node.status, NodeStatus::Ejected);
}
discovery_monitor
.sync_single_node_with_discovery(&discovery_node)
.await
.unwrap();
let node_after_sync = &store_context
.node_store
.get_node(&orchestrator_node.address)
.await
.unwrap();
assert!(node_after_sync.is_some());
if let Some(node) = node_after_sync {
assert_eq!(node.status, NodeStatus::Dead);
}
}
#[tokio::test]
async fn test_first_seen_timestamp_set_on_new_node() {
let node_address = "0x2234567890123456789012345678901234567890";
let discovery_node = DiscoveryNode {
is_validated: true,
is_provider_whitelisted: true,
is_active: true,
node: Node {
id: node_address.to_string(),
provider_address: node_address.to_string(),
ip_address: "192.168.1.100".to_string(),
port: 8080,
compute_pool_id: 1,
..Default::default()
},
is_blacklisted: false,
..Default::default()
};
let store = Arc::new(RedisStore::new_test());
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
let store_context = Arc::new(StoreContext::new(store.clone()));
let fake_wallet = Wallet::new(
"0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97",
Url::parse("http://localhost:8545").unwrap(),
)
.unwrap();
let mode = ServerMode::Full;
let discovery_monitor = DiscoveryMonitor::new(
fake_wallet,
1,
10,
vec!["http://localhost:8080".to_string()],
store_context.clone(),
Arc::new(LoopHeartbeats::new(&mode)),
1,
vec![],
);
let time_before = Utc::now();
// Sync a new node that doesn't exist in the store
discovery_monitor
.sync_single_node_with_discovery(&discovery_node)
.await
.unwrap();
let time_after = Utc::now();
// Verify the node was added with first_seen timestamp
let node_from_store = store_context
.node_store
.get_node(&discovery_node.node.id.parse::<Address>().unwrap())
.await
.unwrap();
assert!(node_from_store.is_some());
let node = node_from_store.unwrap();
// Verify first_seen is set
assert!(node.first_seen.is_some());
let first_seen = node.first_seen.unwrap();
// Verify the timestamp is within the expected range
assert!(first_seen >= time_before && first_seen <= time_after);
// Verify other fields are set correctly
assert_eq!(node.status, NodeStatus::Discovered);
assert_eq!(node.ip_address, "192.168.1.100");
// Test case: Sync the same node again to verify first_seen is preserved
// Simulate some time passing
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
// Update discovery data to simulate a change (e.g., IP address change)
let updated_discovery_node = DiscoveryNode {
is_validated: true,
is_provider_whitelisted: true,
is_active: true,
node: Node {
id: node_address.to_string(),
provider_address: node_address.to_string(),
ip_address: "192.168.1.101".to_string(), // Changed IP
port: 8080,
compute_pool_id: 1,
..Default::default()
},
is_blacklisted: false,
..Default::default()
};
// Sync the node again
discovery_monitor
.sync_single_node_with_discovery(&updated_discovery_node)
.await
.unwrap();
// Verify the node was updated but first_seen is preserved
let node_after_resync = store_context
.node_store
.get_node(&discovery_node.node.id.parse::<Address>().unwrap())
.await
.unwrap()
.unwrap();
// Verify first_seen is still the same (preserved)
assert_eq!(node_after_resync.first_seen, Some(first_seen));
// Verify IP was updated
assert_eq!(node_after_resync.ip_address, "192.168.1.101");
// Status should remain the same
assert_eq!(node_after_resync.status, NodeStatus::Discovered);
}
#[tokio::test]
async fn test_sync_node_with_same_endpoint() {
let store = Arc::new(RedisStore::new_test());
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
let store_context = Arc::new(StoreContext::new(store.clone()));
// Create first node (will be healthy)
let node1_address = "0x1234567890123456789012345678901234567890";
let node1 = DiscoveryNode {
is_validated: true,
is_provider_whitelisted: true,
is_active: true,
node: Node {
id: node1_address.to_string(),
provider_address: node1_address.to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
compute_pool_id: 1,
compute_specs: Some(ComputeSpecs {
ram_mb: Some(1024),
storage_gb: Some(10),
..Default::default()
}),
..Default::default()
},
is_blacklisted: false,
..Default::default()
};
let mut orchestrator_node1 = OrchestratorNode::from(node1.clone());
orchestrator_node1.status = NodeStatus::Healthy;
orchestrator_node1.address = node1.node.id.parse::<Address>().unwrap();
let _ = store_context
.node_store
.add_node(orchestrator_node1.clone())
.await;
// Create second node with same IP and port
let node2_address = "0x2234567890123456789012345678901234567890";
let mut node2 = node1.clone();
node2.node.id = node2_address.to_string();
node2.node.provider_address = node2_address.to_string();
let fake_wallet = Wallet::new(
"0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97",
Url::parse("http://localhost:8545").unwrap(),
)
.unwrap();
let mode = ServerMode::Full;
let discovery_monitor = DiscoveryMonitor::new(
fake_wallet,
1,
10,
vec!["http://localhost:8080".to_string()],
store_context.clone(),
Arc::new(LoopHeartbeats::new(&mode)),
1,
vec![],
);
// Try to sync the second node
discovery_monitor
.sync_single_node_with_discovery(&node2)
.await
.unwrap();
// Verify second node was not added
let node2_result = store_context
.node_store
.get_node(&node2_address.parse::<Address>().unwrap())
.await
.unwrap();
assert!(
node2_result.is_none(),
"Node with same endpoint should not be added"
);
// Create third node with same IP but different port (should be allowed)
let node3_address = "0x3234567890123456789012345678901234567890";
let mut node3 = node1.clone();
node3.node.id = node3_address.to_string();
node3.node.provider_address = node3_address.to_string();
node3.node.port = 8081; // Different port
// Try to sync the third node
discovery_monitor
.sync_single_node_with_discovery(&node3)
.await
.unwrap();
// Verify third node was added (different port)
let node3_result = store_context
.node_store
.get_node(&node3_address.parse::<Address>().unwrap())
.await
.unwrap();
assert!(
node3_result.is_some(),
"Node with different port should be added"
);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/node/mod.rs | crates/orchestrator/src/node/mod.rs | pub(crate) mod invite;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/node/invite.rs | crates/orchestrator/src/node/invite.rs | use crate::models::node::NodeStatus;
use crate::models::node::OrchestratorNode;
use crate::p2p::InviteRequest as InviteRequestWithMetadata;
use crate::store::core::StoreContext;
use crate::utils::loop_heartbeats::LoopHeartbeats;
use alloy::primitives::utils::keccak256 as keccak;
use alloy::primitives::U256;
use alloy::signers::Signer;
use anyhow::{bail, Result};
use futures::stream;
use futures::StreamExt;
use log::{debug, error, info, warn};
use p2p::InviteRequest;
use p2p::InviteRequestUrl;
use shared::web3::wallet::Wallet;
use std::sync::Arc;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use tokio::sync::mpsc::Sender;
use tokio::time::{interval, Duration};
// Timeout constants
const DEFAULT_INVITE_CONCURRENT_COUNT: usize = 32; // Max concurrent count of nodes being invited
pub struct NodeInviter {
wallet: Wallet,
pool_id: u32,
domain_id: u32,
url: InviteRequestUrl,
store_context: Arc<StoreContext>,
heartbeats: Arc<LoopHeartbeats>,
invite_tx: Sender<InviteRequestWithMetadata>,
}
impl NodeInviter {
#[allow(clippy::too_many_arguments)]
pub fn new<'a>(
wallet: Wallet,
pool_id: u32,
domain_id: u32,
host: Option<&'a str>,
port: Option<&'a u16>,
url: Option<&'a str>,
store_context: Arc<StoreContext>,
heartbeats: Arc<LoopHeartbeats>,
invite_tx: Sender<InviteRequestWithMetadata>,
) -> Result<Self> {
let url = if let Some(url) = url {
InviteRequestUrl::MasterUrl(url.to_string())
} else {
let Some(host) = host else {
bail!("either host or url must be provided");
};
let Some(port) = port else {
bail!("either port or url must be provided");
};
InviteRequestUrl::MasterIpPort(host.to_string(), *port)
};
Ok(Self {
wallet,
pool_id,
domain_id,
url,
store_context,
heartbeats,
invite_tx,
})
}
pub async fn run(&self) -> Result<()> {
let mut interval = interval(Duration::from_secs(10));
loop {
interval.tick().await;
debug!("Running NodeInviter to process uninvited nodes...");
if let Err(e) = self.process_uninvited_nodes().await {
error!("Error processing uninvited nodes: {e}");
}
self.heartbeats.update_inviter();
}
}
async fn generate_invite(
&self,
node: &OrchestratorNode,
nonce: [u8; 32],
expiration: [u8; 32],
) -> Result<[u8; 65]> {
let domain_id: [u8; 32] = U256::from(self.domain_id).to_be_bytes();
let pool_id: [u8; 32] = U256::from(self.pool_id).to_be_bytes();
let digest = keccak(
[
&domain_id,
&pool_id,
node.address.as_slice(),
&nonce,
&expiration,
]
.concat(),
);
let signature = self
.wallet
.signer
.sign_message(digest.as_slice())
.await?
.as_bytes()
.to_owned();
Ok(signature)
}
async fn send_invite(&self, node: &OrchestratorNode) -> Result<(), anyhow::Error> {
if node.worker_p2p_id.is_none() || node.worker_p2p_addresses.is_none() {
return Err(anyhow::anyhow!("Node does not have p2p information"));
}
let p2p_id = node.worker_p2p_id.as_ref().unwrap();
let p2p_addresses = node.worker_p2p_addresses.as_ref().unwrap();
// Generate random nonce and expiration
let nonce: [u8; 32] = rand::random();
let expiration: [u8; 32] = U256::from(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| anyhow::anyhow!("System time error: {}", e))?
.as_secs()
+ 1000,
)
.to_be_bytes();
let invite_signature = self.generate_invite(node, nonce, expiration).await?;
let payload = InviteRequest {
invite: hex::encode(invite_signature),
pool_id: self.pool_id,
url: self.url.clone(),
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| anyhow::anyhow!("System time error: {}", e))?
.as_secs(),
expiration,
nonce,
};
info!("Sending invite to node: {p2p_id}");
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let invite = InviteRequestWithMetadata {
worker_wallet_address: node.address,
worker_p2p_id: p2p_id.clone(),
worker_addresses: p2p_addresses.clone(),
invite: payload,
response_tx,
};
self.invite_tx
.send(invite)
.await
.map_err(|_| anyhow::anyhow!("failed to send invite request"))?;
match response_rx.await {
Ok(_) => {
info!("Successfully invited node");
if let Err(e) = self
.store_context
.node_store
.update_node_status(&node.address, NodeStatus::WaitingForHeartbeat)
.await
{
error!("Error updating node status: {e}");
}
if let Err(e) = self
.store_context
.heartbeat_store
.clear_unhealthy_counter(&node.address)
.await
{
error!("Error clearing unhealthy counter: {e}");
}
Ok(())
}
Err(e) => {
error!("Error sending invite to node: {e:?}");
Err(anyhow::anyhow!("Error sending invite to node: {:?}", e))
}
}
}
async fn process_uninvited_nodes(&self) -> Result<()> {
let nodes = self.store_context.node_store.get_uninvited_nodes().await?;
let invited_nodes = stream::iter(nodes.into_iter().map(|node| async move {
info!("Processing node {:?}", node.address);
match self.send_invite(&node).await {
Ok(_) => {
info!("Successfully processed node {:?}", node.address);
Ok(())
}
Err(e) => {
error!("Failed to process node {:?}: {}", node.address, e);
Err((node, e))
}
}
}))
.buffer_unordered(DEFAULT_INVITE_CONCURRENT_COUNT)
.collect::<Vec<_>>()
.await;
let failed_nodes: Vec<_> = invited_nodes.into_iter().filter_map(Result::err).collect();
if !failed_nodes.is_empty() {
warn!(
"Failed to process {} nodes: {:?}",
failed_nodes.len(),
failed_nodes
.iter()
.map(|(node, _)| node.address)
.collect::<Vec<_>>()
);
}
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/mod.rs | crates/orchestrator/src/store/mod.rs | pub(crate) mod core;
mod domains;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/domains/task_store.rs | crates/orchestrator/src/store/domains/task_store.rs | use crate::store::core::RedisStore;
use crate::NodeGroupsPlugin;
use anyhow::Result;
use futures::future;
use log::error;
use redis::AsyncCommands;
use shared::models::task::Task;
use std::sync::Arc;
use tokio::sync::Mutex;
const TASK_KEY_PREFIX: &str = "orchestrator:task:";
const TASK_LIST_KEY: &str = "orchestrator:tasks";
const TASK_NAME_INDEX_KEY: &str = "orchestrator:task_names";
pub struct TaskStore {
redis: Arc<RedisStore>,
observers: Arc<Mutex<Vec<Arc<NodeGroupsPlugin>>>>,
}
impl TaskStore {
pub fn new(redis: Arc<RedisStore>) -> Self {
Self {
redis,
observers: Arc::new(Mutex::new(vec![])),
}
}
pub async fn add_observer(&self, observer: Arc<NodeGroupsPlugin>) {
let mut observers = self.observers.lock().await;
observers.push(observer);
}
pub async fn add_task(&self, task: Task) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
// Store the task with its ID as key
let task_key = format!("{}{}", TASK_KEY_PREFIX, task.id);
let _: () = con.set(&task_key, task.clone()).await?;
// Add task ID to list of all tasks
let _: () = con.rpush(TASK_LIST_KEY, task.id.to_string()).await?;
// Add task name to set for fast lookup
let _: () = con.sadd(TASK_NAME_INDEX_KEY, &task.name).await?;
// Notify observers synchronously
let observers = self.observers.lock().await.clone();
for observer in observers.iter() {
if let Err(e) = observer.on_task_created(&task) {
error!("Error notifying observer: {e}");
}
}
Ok(())
}
pub async fn get_all_tasks(&self) -> Result<Vec<Task>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
// Get all task IDs
let task_ids: Vec<String> = con.lrange(TASK_LIST_KEY, 0, -1).await?;
// Get each task by ID in parallel
let task_futures: Vec<_> = task_ids
.into_iter()
.map(|id| {
let task_key = format!("{TASK_KEY_PREFIX}{id}");
let mut con = con.clone();
async move {
let task: Option<Task> = con.get(&task_key).await.ok().flatten();
task
}
})
.collect();
let task_results = future::join_all(task_futures).await;
let mut tasks: Vec<Task> = task_results.into_iter().flatten().collect();
tasks.sort_by(|a, b| b.created_at.cmp(&a.created_at));
Ok(tasks)
}
pub async fn delete_task(&self, id: String) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let task = self.get_task(&id).await?;
// Delete task from individual storage
let task_key = format!("{TASK_KEY_PREFIX}{id}");
let _: () = con.del(&task_key).await?;
// Remove task ID from list
let _: () = con.lrem(TASK_LIST_KEY, 0, id).await?;
// Remove task name from set if task exists
if let Some(ref task) = task {
let _: () = con.srem(TASK_NAME_INDEX_KEY, &task.name).await?;
}
// Notify observers synchronously
let observers = self.observers.lock().await.clone();
for observer in observers.iter() {
if let Err(e) = observer.on_task_deleted(task.clone()) {
error!("Error notifying observer: {e}");
}
}
Ok(())
}
pub async fn delete_all_tasks(&self) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
// Get all tasks first for observer notifications
let tasks = self.get_all_tasks().await?;
// Delete all individual task keys
for task in &tasks {
let task_key = format!("{}{}", TASK_KEY_PREFIX, task.id);
let _: () = con.del(&task_key).await?;
}
// Clear the task list
let _: () = con.del(TASK_LIST_KEY).await?;
// Clear the task names index
let _: () = con.del(TASK_NAME_INDEX_KEY).await?;
// Notify observers synchronously
let observers = self.observers.lock().await.clone();
for task in tasks {
for observer in observers.iter() {
if let Err(e) = observer.on_task_deleted(Some(task.clone())) {
error!("Error notifying observer: {e}");
}
}
}
Ok(())
}
pub async fn get_task(&self, id: &str) -> Result<Option<Task>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let task_key = format!("{TASK_KEY_PREFIX}{id}");
let task: Option<Task> = con.get(&task_key).await?;
Ok(task)
}
pub async fn task_name_exists(&self, name: &str) -> Result<bool> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let exists: bool = con.sismember(TASK_NAME_INDEX_KEY, name).await?;
Ok(exists)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/domains/metrics_store.rs | crates/orchestrator/src/store/domains/metrics_store.rs | use crate::store::core::RedisStore;
use alloy::primitives::Address;
use anyhow::{anyhow, Result};
use log::error;
use redis::AsyncCommands;
use shared::models::metric::MetricEntry;
use std::collections::HashMap;
use std::sync::Arc;
const ORCHESTRATOR_NODE_METRICS_STORE: &str = "orchestrator:node_metrics";
pub struct MetricsStore {
redis: Arc<RedisStore>,
}
impl MetricsStore {
pub fn new(redis: Arc<RedisStore>) -> Self {
Self { redis }
}
fn clean_label(&self, label: &str) -> String {
label.replace(':', "")
}
pub async fn store_metrics(
&self,
metrics: Option<Vec<MetricEntry>>,
sender_address: Address,
) -> Result<()> {
let Some(metrics) = metrics else {
return Ok(());
};
if metrics.is_empty() {
return Ok(());
}
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_NODE_METRICS_STORE}:{sender_address}");
for entry in metrics {
let task_id = if entry.key.task_id.is_empty() {
"manual".to_string()
} else {
entry.key.task_id.clone()
};
let cleaned_label = self.clean_label(&entry.key.label);
let metric_key = format!("{task_id}:{cleaned_label}");
// Check for dashboard-progress metrics to maintain max value behavior
let should_update = if entry.key.label.contains("dashboard-progress") {
let existing_value: Option<String> = con.hget(&node_key, &metric_key).await?;
match existing_value {
Some(val) => match val.parse::<f64>() {
Ok(old_val) => entry.value > old_val,
Err(_) => true, // Overwrite if old value is not a valid float
},
None => true,
}
} else {
true
};
if should_update {
if let Err(err) = con
.hset::<_, _, _, ()>(&node_key, &metric_key, entry.value)
.await
{
error!("Could not update metric value in redis: {err}");
}
}
}
Ok(())
}
pub async fn store_manual_metrics(&self, label: String, value: f64) -> Result<()> {
self.store_metrics(
Some(vec![MetricEntry {
key: shared::models::metric::MetricKey {
task_id: "".to_string(),
label,
},
value,
}]),
Address::ZERO,
)
.await
}
pub async fn delete_metric(&self, task_id: &str, label: &str, address: &str) -> Result<bool> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let cleaned_label = self.clean_label(label);
let Ok(node_address) = address.parse::<Address>() else {
return Ok(false);
};
let node_key = format!("{ORCHESTRATOR_NODE_METRICS_STORE}:{node_address}");
let metric_key = format!("{task_id}:{cleaned_label}");
match con.hdel::<_, _, i32>(&node_key, &metric_key).await {
Ok(deleted) => Ok(deleted == 1),
Err(err) => {
error!("Could not delete metric from redis: {err}");
Err(anyhow!("Failed to delete metric from redis: {}", err))
}
}
}
pub async fn get_metrics_for_node(
&self,
node_address: Address,
) -> Result<HashMap<String, HashMap<String, f64>>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_NODE_METRICS_STORE}:{node_address}");
// Get all metrics for this node using O(1) HGETALL operation
let node_metrics: HashMap<String, f64> = con.hgetall(&node_key).await?;
let mut result: HashMap<String, HashMap<String, f64>> = HashMap::new();
for (metric_key, value) in node_metrics {
if let Some((task_id, metric_name)) = metric_key.split_once(':') {
result
.entry(task_id.to_string())
.or_default()
.insert(metric_name.to_string(), value);
}
}
Ok(result)
}
pub async fn get_metric_keys_for_node(&self, node_address: Address) -> Result<Vec<String>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_NODE_METRICS_STORE}:{node_address}");
// Use HKEYS to get all field names for this node (O(1) operation)
let keys: Vec<String> = con.hkeys(&node_key).await?;
Ok(keys)
}
#[cfg(test)]
pub async fn get_aggregate_metrics_for_task(
&self,
task_id: &str,
) -> Result<HashMap<String, f64>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let pattern = format!("{}:*", ORCHESTRATOR_NODE_METRICS_STORE);
// Scan all node keys
let mut iter: redis::AsyncIter<String> = con.scan_match(&pattern).await?;
let mut all_node_keys = Vec::new();
while let Some(key) = iter.next_item().await {
all_node_keys.push(key);
}
drop(iter);
let mut result: HashMap<String, f64> = HashMap::new();
// For each node, get metrics for this specific task
for node_key in all_node_keys {
let node_metrics: HashMap<String, f64> = con.hgetall(&node_key).await?;
for (metric_key, value) in node_metrics {
if let Some((t_id, metric_name)) = metric_key.split_once(':') {
if t_id == task_id {
*result.entry(metric_name.to_string()).or_insert(0.0) += value;
}
}
}
}
Ok(result)
}
pub async fn get_aggregate_metrics_for_all_tasks(&self) -> Result<HashMap<String, f64>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let pattern = format!("{ORCHESTRATOR_NODE_METRICS_STORE}:*");
// Scan all node keys
let mut iter: redis::AsyncIter<String> = con.scan_match(&pattern).await?;
let mut all_node_keys = Vec::new();
while let Some(key) = iter.next_item().await {
all_node_keys.push(key);
}
drop(iter);
let mut result: HashMap<String, f64> = HashMap::new();
// For each node, aggregate all metrics
for node_key in all_node_keys {
let node_metrics: HashMap<String, f64> = con.hgetall(&node_key).await?;
for (metric_key, value) in node_metrics {
if let Some((_task_id, metric_name)) = metric_key.split_once(':') {
*result.entry(metric_name.to_string()).or_insert(0.0) += value;
}
}
}
Ok(result)
}
pub async fn get_all_metrics(
&self,
) -> Result<HashMap<String, HashMap<String, HashMap<String, f64>>>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let pattern = format!("{ORCHESTRATOR_NODE_METRICS_STORE}:*");
// Scan all node keys
let mut iter: redis::AsyncIter<String> = con.scan_match(&pattern).await?;
let mut all_node_keys = Vec::new();
while let Some(key) = iter.next_item().await {
all_node_keys.push(key);
}
drop(iter);
let mut result: HashMap<String, HashMap<String, HashMap<String, f64>>> = HashMap::new();
// For each node, organize metrics by task and metric name
for node_key in all_node_keys {
// Extract node address from key
if let Some(node_addr) = node_key.split(':').next_back() {
let node_metrics: HashMap<String, f64> = con.hgetall(&node_key).await?;
for (metric_key, value) in node_metrics {
if let Some((task_id, metric_name)) = metric_key.split_once(':') {
result
.entry(task_id.to_string())
.or_default()
.entry(metric_name.to_string())
.or_default()
.insert(node_addr.to_string(), value);
}
}
}
}
Ok(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api::tests::helper::create_test_app_state;
use shared::models::metric::MetricEntry;
use shared::models::metric::MetricKey;
use std::str::FromStr;
#[tokio::test]
async fn test_store_metrics() {
let app_state = create_test_app_state().await;
let metrics_store = app_state.store_context.metrics_store.clone();
let metrics = vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "cpu_usage".to_string(),
},
value: 1.0,
}];
metrics_store
.store_metrics(Some(metrics), Address::ZERO)
.await
.unwrap();
let metrics = vec![MetricEntry {
key: MetricKey {
task_id: "task_0".to_string(),
label: "cpu_usage".to_string(),
},
value: 2.0,
}];
metrics_store
.store_metrics(Some(metrics), Address::ZERO)
.await
.unwrap();
let metrics = metrics_store
.get_aggregate_metrics_for_task("task_1")
.await
.unwrap();
assert_eq!(metrics.get("cpu_usage"), Some(&1.0));
let metrics = metrics_store
.get_aggregate_metrics_for_all_tasks()
.await
.unwrap();
assert_eq!(metrics.get("cpu_usage"), Some(&3.0));
}
#[tokio::test]
async fn test_get_metrics_for_node() {
let app_state = create_test_app_state().await;
let metrics_store = app_state.store_context.metrics_store.clone();
let node_addr_0 = Address::ZERO;
let node_addr_1 = Address::from_str("0x1234567890123456789012345678901234567890").unwrap();
let metrics1 = vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "cpu_usage".to_string(),
},
value: 1.0,
}];
metrics_store
.store_metrics(Some(metrics1.clone()), node_addr_0)
.await
.unwrap();
metrics_store
.store_metrics(Some(metrics1), node_addr_1)
.await
.unwrap();
let metrics2 = vec![MetricEntry {
key: MetricKey {
task_id: "task_2".to_string(),
label: "cpu_usage".to_string(),
},
value: 1.0,
}];
metrics_store
.store_metrics(Some(metrics2), node_addr_1)
.await
.unwrap();
let metrics = metrics_store
.get_metrics_for_node(node_addr_0)
.await
.unwrap();
assert_eq!(metrics.get("task_1").unwrap().get("cpu_usage"), Some(&1.0));
assert_eq!(metrics.get("task_2"), None);
let metrics_1 = metrics_store
.get_metrics_for_node(node_addr_1)
.await
.unwrap();
assert_eq!(
metrics_1.get("task_1").unwrap().get("cpu_usage"),
Some(&1.0)
);
assert_eq!(
metrics_1.get("task_2").unwrap().get("cpu_usage"),
Some(&1.0)
);
}
#[tokio::test]
async fn test_store_metrics_value_overwrite() {
let app_state = create_test_app_state().await;
let metrics_store = app_state.store_context.metrics_store.clone();
let node_addr = Address::ZERO;
// Test dashboard-progress metric maintains max value
metrics_store
.store_metrics(
Some(vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "dashboard-progress/test/value".to_string(),
},
value: 2.0,
}]),
node_addr,
)
.await
.unwrap();
metrics_store
.store_metrics(
Some(vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "dashboard-progress/test/value".to_string(),
},
value: 1.0,
}]),
node_addr,
)
.await
.unwrap();
let metrics = metrics_store.get_metrics_for_node(node_addr).await.unwrap();
assert_eq!(
metrics
.get("task_1")
.unwrap()
.get("dashboard-progress/test/value"),
Some(&2.0)
);
metrics_store
.store_metrics(
Some(vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "dashboard-progress/test/value".to_string(),
},
value: 3.0,
}]),
node_addr,
)
.await
.unwrap();
let metrics = metrics_store.get_metrics_for_node(node_addr).await.unwrap();
assert_eq!(
metrics
.get("task_1")
.unwrap()
.get("dashboard-progress/test/value"),
Some(&3.0)
);
// Test non-dashboard metric gets overwritten regardless of value
metrics_store
.store_metrics(
Some(vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "cpu_usage".to_string(),
},
value: 2.0,
}]),
node_addr,
)
.await
.unwrap();
metrics_store
.store_metrics(
Some(vec![MetricEntry {
key: MetricKey {
task_id: "task_1".to_string(),
label: "cpu_usage".to_string(),
},
value: 1.0,
}]),
node_addr,
)
.await
.unwrap();
let metrics = metrics_store.get_metrics_for_node(node_addr).await.unwrap();
assert_eq!(metrics.get("task_1").unwrap().get("cpu_usage"), Some(&1.0));
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/domains/node_store.rs | crates/orchestrator/src/store/domains/node_store.rs | use crate::models::node::NodeStatus;
use crate::models::node::OrchestratorNode;
use crate::store::core::RedisStore;
use alloy::primitives::Address;
use anyhow::Result;
use chrono::{DateTime, Utc};
use log::info;
use redis::AsyncCommands;
use shared::models::heartbeat::TaskDetails;
use shared::models::node::NodeLocation;
use shared::models::task::TaskState;
use std::collections::HashMap;
use std::sync::Arc;
const ORCHESTRATOR_BASE_KEY: &str = "orchestrator:node";
const ORCHESTRATOR_NODE_INDEX: &str = "orchestrator:node_index";
pub struct NodeStore {
redis: Arc<RedisStore>,
}
impl NodeStore {
pub fn new(redis: Arc<RedisStore>) -> Self {
Self { redis }
}
// convert orchestrator node to redis hash fields
fn node_to_hash_fields(node: &OrchestratorNode) -> Result<Vec<(String, String)>> {
let mut fields = vec![
("address".to_string(), node.address.to_string()),
("ip_address".to_string(), node.ip_address.clone()),
("port".to_string(), node.port.to_string()),
];
let status_json = serde_json::to_string(&node.status)
.map_err(|e| anyhow::anyhow!("Failed to serialize status: {}", e))?;
fields.push(("status".to_string(), status_json));
if let Some(task_id) = &node.task_id {
fields.push(("task_id".to_string(), task_id.clone()));
}
if let Some(task_state) = &node.task_state {
let task_state_json = serde_json::to_string(task_state)
.map_err(|e| anyhow::anyhow!("Failed to serialize task_state: {}", e))?;
fields.push(("task_state".to_string(), task_state_json));
}
if let Some(task_details) = &node.task_details {
let task_details_json = serde_json::to_string(task_details)
.map_err(|e| anyhow::anyhow!("Failed to serialize task_details: {}", e))?;
fields.push(("task_details".to_string(), task_details_json));
}
if let Some(version) = &node.version {
fields.push(("version".to_string(), version.clone()));
}
if let Some(p2p_id) = &node.p2p_id {
fields.push(("p2p_id".to_string(), p2p_id.clone()));
}
if let Some(last_status_change) = &node.last_status_change {
fields.push((
"last_status_change".to_string(),
last_status_change.to_rfc3339(),
));
}
if let Some(first_seen) = &node.first_seen {
fields.push(("first_seen".to_string(), first_seen.to_rfc3339()));
}
if let Some(compute_specs) = &node.compute_specs {
let compute_specs_json = serde_json::to_string(compute_specs)
.map_err(|e| anyhow::anyhow!("Failed to serialize compute_specs: {}", e))?;
fields.push(("compute_specs".to_string(), compute_specs_json));
}
if let Some(worker_p2p_id) = &node.worker_p2p_id {
fields.push(("worker_p2p_id".to_string(), worker_p2p_id.clone()));
}
if let Some(worker_p2p_addresses) = &node.worker_p2p_addresses {
let worker_p2p_addresses_json = serde_json::to_string(worker_p2p_addresses)
.map_err(|e| anyhow::anyhow!("Failed to serialize worker_p2p_addresses: {}", e))?;
fields.push((
"worker_p2p_addresses".to_string(),
worker_p2p_addresses_json,
));
}
if let Some(location) = &node.location {
let location_json = serde_json::to_string(location)
.map_err(|e| anyhow::anyhow!("Failed to serialize location: {}", e))?;
fields.push(("location".to_string(), location_json));
}
Ok(fields)
}
// Helper method to convert Redis hash fields to OrchestratorNode
fn hash_fields_to_node(fields: HashMap<String, String>) -> Result<OrchestratorNode> {
let address = fields
.get("address")
.ok_or_else(|| anyhow::anyhow!("Missing address field"))?
.parse()
.map_err(|_| anyhow::anyhow!("Invalid address format"))?;
let ip_address = fields
.get("ip_address")
.ok_or_else(|| anyhow::anyhow!("Missing ip_address field"))?
.clone();
let port = fields
.get("port")
.ok_or_else(|| anyhow::anyhow!("Missing port field"))?
.parse()
.map_err(|_| anyhow::anyhow!("Invalid port format"))?;
let status = fields
.get("status")
.ok_or_else(|| anyhow::anyhow!("Missing status field"))
.and_then(|s| {
serde_json::from_str(s).map_err(|e| anyhow::anyhow!("Invalid status format: {}", e))
})?;
let task_id = fields.get("task_id").cloned();
let task_state = fields
.get("task_state")
.and_then(|s| serde_json::from_str(s).ok());
let task_details = fields
.get("task_details")
.and_then(|s| serde_json::from_str(s).ok());
let version = fields.get("version").cloned();
let p2p_id = fields.get("p2p_id").cloned();
let last_status_change = fields
.get("last_status_change")
.and_then(|s| DateTime::parse_from_rfc3339(s).ok())
.map(|dt| dt.with_timezone(&Utc));
let first_seen = fields
.get("first_seen")
.and_then(|s| DateTime::parse_from_rfc3339(s).ok())
.map(|dt| dt.with_timezone(&Utc));
let compute_specs = fields
.get("compute_specs")
.and_then(|s| serde_json::from_str(s).ok());
let worker_p2p_id = fields.get("worker_p2p_id").cloned();
let worker_p2p_addresses = fields
.get("worker_p2p_addresses")
.and_then(|s| serde_json::from_str(s).ok());
let location = fields
.get("location")
.and_then(|s| serde_json::from_str(s).ok());
Ok(OrchestratorNode {
address,
ip_address,
port,
status,
task_id,
task_state,
task_details,
version,
p2p_id,
last_status_change,
first_seen,
compute_specs,
worker_p2p_id,
worker_p2p_addresses,
location,
})
}
pub async fn get_nodes(&self) -> Result<Vec<OrchestratorNode>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let addresses: Vec<String> = con.smembers(ORCHESTRATOR_NODE_INDEX).await?;
if addresses.is_empty() {
return Ok(Vec::new());
}
let mut nodes = Vec::new();
// Use pipeline for efficient bulk hash retrieval
let mut pipe = redis::pipe();
for address in &addresses {
let key = format!("{ORCHESTRATOR_BASE_KEY}:{address}");
pipe.hgetall(&key);
}
let hash_results: Vec<HashMap<String, String>> = pipe.query_async(&mut con).await?;
for fields in hash_results {
if !fields.is_empty() {
match Self::hash_fields_to_node(fields) {
Ok(node) => nodes.push(node),
Err(e) => {
info!("Failed to deserialize node: {e}");
}
}
}
}
#[allow(clippy::match_same_arms)]
nodes.sort_by(|a, b| match (&a.status, &b.status) {
(NodeStatus::Healthy, NodeStatus::Healthy) => std::cmp::Ordering::Equal,
(NodeStatus::Healthy, _) => std::cmp::Ordering::Less,
(_, NodeStatus::Healthy) => std::cmp::Ordering::Greater,
(NodeStatus::Discovered, NodeStatus::Discovered) => std::cmp::Ordering::Equal,
(NodeStatus::Discovered, _) => std::cmp::Ordering::Less,
(_, NodeStatus::Discovered) => std::cmp::Ordering::Greater,
(NodeStatus::Dead, NodeStatus::Dead) => std::cmp::Ordering::Equal,
(NodeStatus::Dead, _) => std::cmp::Ordering::Greater,
(_, NodeStatus::Dead) => std::cmp::Ordering::Less,
_ => std::cmp::Ordering::Equal,
});
Ok(nodes)
}
pub async fn add_node(&self, node: OrchestratorNode) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
// Always use hash format for new nodes
let fields = Self::node_to_hash_fields(&node)?;
let key = format!("{}:{}", ORCHESTRATOR_BASE_KEY, node.address);
// Use Redis transaction (MULTI/EXEC) to ensure atomic execution of both operations
let mut pipe = redis::pipe();
pipe.atomic()
.sadd(ORCHESTRATOR_NODE_INDEX, node.address.to_string())
.hset_multiple(&key, &fields);
let _: () = pipe.query_async(&mut con).await?;
Ok(())
}
pub async fn get_node(&self, address: &Address) -> Result<Option<OrchestratorNode>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let key = format!("{ORCHESTRATOR_BASE_KEY}:{address}");
let fields: HashMap<String, String> = con.hgetall(&key).await?;
if fields.is_empty() {
return Ok(None);
}
match Self::hash_fields_to_node(fields) {
Ok(node) => Ok(Some(node)),
Err(e) => {
info!("Failed to deserialize node {address}: {e}");
Ok(None)
}
}
}
pub async fn get_uninvited_nodes(&self) -> Result<Vec<OrchestratorNode>> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let addresses: Vec<String> = con.smembers(ORCHESTRATOR_NODE_INDEX).await?;
if addresses.is_empty() {
return Ok(Vec::new());
}
let mut nodes = Vec::new();
// Use pipeline for efficient bulk hash retrieval
let mut pipe = redis::pipe();
for address in &addresses {
let key = format!("{ORCHESTRATOR_BASE_KEY}:{address}");
pipe.hgetall(&key);
}
let hash_results: Vec<HashMap<String, String>> = pipe.query_async(&mut con).await?;
for fields in hash_results {
if !fields.is_empty() {
match Self::hash_fields_to_node(fields) {
Ok(node) if matches!(node.status, NodeStatus::Discovered) => {
nodes.push(node);
}
Ok(_) => {} // Node exists but not in Discovered status
Err(e) => {
info!("Failed to deserialize node: {e}");
}
}
}
}
Ok(nodes)
}
pub async fn update_node_status(
&self,
node_address: &Address,
status: NodeStatus,
) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_BASE_KEY}:{node_address}");
// Update only the specific fields we need to change
let status_json = serde_json::to_string(&status)?;
let last_status_change = chrono::Utc::now().to_rfc3339();
let mut pipe = redis::pipe();
pipe.atomic().hset(&node_key, "status", status_json).hset(
&node_key,
"last_status_change",
last_status_change,
);
let _: () = pipe.query_async(&mut con).await?;
Ok(())
}
pub async fn update_node_version(&self, node_address: &Address, version: &str) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_BASE_KEY}:{node_address}");
// Update only the version field
let _: () = con.hset(&node_key, "version", version).await?;
Ok(())
}
pub async fn update_node_location(
&self,
node_address: &Address,
location: &NodeLocation,
) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_BASE_KEY}:{node_address}");
// Update only the location field
let location_json = serde_json::to_string(location)?;
let _: () = con.hset(&node_key, "location", location_json).await?;
Ok(())
}
pub async fn update_node_p2p_id(&self, node_address: &Address, p2p_id: &str) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_BASE_KEY}:{node_address}");
// Update only the p2p_id field
let _: () = con.hset(&node_key, "p2p_id", p2p_id).await?;
Ok(())
}
pub async fn update_node_task(
&self,
node_address: Address,
current_task: Option<String>,
task_state: Option<String>,
task_details: Option<TaskDetails>,
) -> Result<()> {
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
let node_key = format!("{ORCHESTRATOR_BASE_KEY}:{node_address}");
// Build the update pipeline based on what fields need to be updated
let mut pipe = redis::pipe();
pipe.atomic();
match (current_task, task_state, task_details) {
(Some(task), Some(state), details) => {
// Update task-related fields
pipe.hset(&node_key, "task_id", task);
let task_state_enum = TaskState::from(state.as_str());
let task_state_json = serde_json::to_string(&task_state_enum)?;
pipe.hset(&node_key, "task_state", task_state_json);
if let Some(details) = details {
let details_json = serde_json::to_string(&details)?;
pipe.hset(&node_key, "task_details", details_json);
} else {
pipe.hdel(&node_key, "task_details");
}
}
_ => {
// Clear all task-related fields
pipe.hdel(&node_key, vec!["task_id", "task_state", "task_details"]);
}
}
let _: () = pipe.query_async(&mut con).await?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::api::tests::helper::create_test_app_state;
use crate::models::node::NodeStatus;
use crate::models::node::OrchestratorNode;
use alloy::primitives::Address;
use std::str::FromStr;
#[tokio::test]
async fn test_get_uninvited_nodes() {
let app_state = create_test_app_state().await;
let node_store = &app_state.store_context.node_store;
let uninvited_node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000001").unwrap(),
ip_address: "192.168.1.1".to_string(),
port: 8080,
status: NodeStatus::Discovered,
..Default::default()
};
let healthy_node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000002").unwrap(),
ip_address: "192.168.1.2".to_string(),
port: 8081,
status: NodeStatus::Healthy,
..Default::default()
};
node_store.add_node(uninvited_node.clone()).await.unwrap();
node_store.add_node(healthy_node.clone()).await.unwrap();
let uninvited_nodes = node_store.get_uninvited_nodes().await.unwrap();
assert_eq!(uninvited_nodes.len(), 1);
assert_eq!(uninvited_nodes[0].address, uninvited_node.address);
}
#[tokio::test]
async fn test_node_sorting() {
let app_state = create_test_app_state().await;
let node_store = &app_state.store_context.node_store;
let nodes = vec![
OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000003").unwrap(),
ip_address: "192.168.1.3".to_string(),
port: 8082,
status: NodeStatus::Dead,
..Default::default()
},
OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000002").unwrap(),
ip_address: "192.168.1.2".to_string(),
port: 8081,
status: NodeStatus::Discovered,
..Default::default()
},
OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000001").unwrap(),
ip_address: "192.168.1.1".to_string(),
port: 8080,
status: NodeStatus::Healthy,
..Default::default()
},
];
for node in nodes {
node_store.add_node(node).await.unwrap();
}
let nodes = node_store.get_nodes().await.unwrap();
assert_eq!(nodes.len(), 3);
assert_eq!(
nodes[0].address,
Address::from_str("0x0000000000000000000000000000000000000001").unwrap()
);
assert_eq!(
nodes[1].address,
Address::from_str("0x0000000000000000000000000000000000000002").unwrap()
);
assert_eq!(
nodes[2].address,
Address::from_str("0x0000000000000000000000000000000000000003").unwrap()
);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/domains/heartbeat_store.rs | crates/orchestrator/src/store/domains/heartbeat_store.rs | use crate::store::core::RedisStore;
use alloy::primitives::Address;
use anyhow::{anyhow, Result};
use redis::AsyncCommands;
use shared::models::heartbeat::HeartbeatRequest;
use std::str::FromStr;
use std::sync::Arc;
const ORCHESTRATOR_UNHEALTHY_COUNTER_KEY: &str = "orchestrator:unhealthy_counter";
const ORCHESTRATOR_HEARTBEAT_KEY: &str = "orchestrator:heartbeat";
pub struct HeartbeatStore {
redis: Arc<RedisStore>,
}
impl HeartbeatStore {
pub fn new(redis: Arc<RedisStore>) -> Self {
Self { redis }
}
pub async fn beat(&self, payload: &HeartbeatRequest) -> Result<()> {
let address =
Address::from_str(&payload.address).map_err(|_| anyhow!("Failed to parse address"))?;
let key = format!("{ORCHESTRATOR_HEARTBEAT_KEY}:{address}");
let payload_string =
serde_json::to_string(payload).map_err(|_| anyhow!("Failed to serialize payload"))?;
let mut con = self.redis.client.get_multiplexed_async_connection().await?;
con.set_options::<_, _, ()>(
&key,
payload_string,
redis::SetOptions::default().with_expiration(redis::SetExpiry::EX(90)),
)
.await
.map_err(|_| anyhow!("Failed to set options"))?;
Ok(())
}
pub async fn get_heartbeat(&self, address: &Address) -> Result<Option<HeartbeatRequest>> {
let mut con = self
.redis
.client
.get_multiplexed_async_connection()
.await
.map_err(|_| anyhow!("Failed to get connection"))?;
let key = format!("{ORCHESTRATOR_HEARTBEAT_KEY}:{address}");
let value: Option<String> = con
.get(key)
.await
.map_err(|_| anyhow!("Failed to get value"))?;
Ok(value.and_then(|v| serde_json::from_str(&v).ok()))
}
pub async fn get_unhealthy_counter(&self, address: &Address) -> Result<u32> {
let mut con = self
.redis
.client
.get_multiplexed_async_connection()
.await
.map_err(|_| anyhow!("Failed to get connection"))?;
let key = format!("{ORCHESTRATOR_UNHEALTHY_COUNTER_KEY}:{address}");
let value: Option<String> = con
.get(key)
.await
.map_err(|_| anyhow!("Failed to get value"))?;
match value {
Some(value) => value
.parse::<u32>()
.map_err(|_| anyhow!("Failed to parse counter value")),
None => Ok(0),
}
}
#[cfg(test)]
pub async fn set_unhealthy_counter(&self, address: &Address, counter: u32) -> Result<()> {
let mut con = self
.redis
.client
.get_multiplexed_async_connection()
.await
.map_err(|_| anyhow!("Failed to get connection"))?;
let key = format!("{}:{}", ORCHESTRATOR_UNHEALTHY_COUNTER_KEY, address);
con.set(key, counter.to_string())
.await
.map_err(|_| anyhow!("Failed to set value"))
}
pub async fn increment_unhealthy_counter(&self, address: &Address) -> Result<()> {
let mut con = self
.redis
.client
.get_multiplexed_async_connection()
.await
.map_err(|_| anyhow!("Failed to get connection"))?;
let key = format!("{ORCHESTRATOR_UNHEALTHY_COUNTER_KEY}:{address}");
con.incr(key, 1)
.await
.map_err(|_| anyhow!("Failed to increment value"))
}
pub async fn clear_unhealthy_counter(&self, address: &Address) -> Result<()> {
let mut con = self
.redis
.client
.get_multiplexed_async_connection()
.await
.map_err(|_| anyhow!("Failed to get connection"))?;
let key = format!("{ORCHESTRATOR_UNHEALTHY_COUNTER_KEY}:{address}");
con.del(key)
.await
.map_err(|_| anyhow!("Failed to delete value"))
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/domains/mod.rs | crates/orchestrator/src/store/domains/mod.rs | pub(super) mod heartbeat_store;
pub(super) mod metrics_store;
pub(super) mod node_store;
pub(super) mod task_store;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/core/mod.rs | crates/orchestrator/src/store/core/mod.rs | pub(crate) mod context;
pub(crate) mod redis;
pub use context::StoreContext;
pub use redis::RedisStore;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/core/context.rs | crates/orchestrator/src/store/core/context.rs | use crate::store::core::RedisStore;
use crate::store::domains::heartbeat_store::HeartbeatStore;
use crate::store::domains::metrics_store::MetricsStore;
use crate::store::domains::node_store::NodeStore;
use crate::store::domains::task_store::TaskStore;
use std::sync::Arc;
pub struct StoreContext {
pub node_store: Arc<NodeStore>,
pub heartbeat_store: Arc<HeartbeatStore>,
pub task_store: Arc<TaskStore>,
pub metrics_store: Arc<MetricsStore>,
}
impl StoreContext {
pub fn new(store: Arc<RedisStore>) -> Self {
Self {
node_store: Arc::new(NodeStore::new(store.clone())),
heartbeat_store: Arc::new(HeartbeatStore::new(store.clone())),
task_store: Arc::new(TaskStore::new(store.clone())),
metrics_store: Arc::new(MetricsStore::new(store.clone())),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/store/core/redis.rs | crates/orchestrator/src/store/core/redis.rs | #[cfg(test)]
use log::debug;
use log::info;
use redis::Client;
#[cfg(test)]
use redis_test::server::RedisServer;
#[cfg(test)]
use std::sync::Arc;
#[cfg(test)]
use std::thread;
#[cfg(test)]
use std::time::Duration;
#[derive(Clone)]
pub struct RedisStore {
pub client: Client,
#[allow(dead_code)]
#[cfg(test)]
server: Arc<RedisServer>,
}
impl RedisStore {
pub fn new(redis_url: &str) -> Self {
match Client::open(redis_url) {
Ok(client) => {
info!("Successfully connected to Redis at {redis_url}");
Self {
client,
#[cfg(test)]
server: Arc::new(RedisServer::new()),
}
}
Err(e) => {
panic!("Redis connection error: {e}");
}
}
}
#[cfg(test)]
pub fn new_test() -> Self {
let server = RedisServer::new();
// Get the server address
let (host, port) = match server.client_addr() {
redis::ConnectionAddr::Tcp(host, port) => (host.clone(), *port),
_ => panic!("Expected TCP connection"),
};
let redis_url = format!("redis://{}:{}", host, port);
debug!("Starting test Redis server at {}", redis_url);
// Add a small delay to ensure server is ready
thread::sleep(Duration::from_millis(100));
// Try to connect with retry logic
let client = loop {
if let Ok(client) = Client::open(redis_url.clone()) {
// Verify connection works
if let Ok(mut conn) = client.get_connection() {
if redis::cmd("PING").query::<String>(&mut conn).is_ok() {
let _ = redis::cmd("FLUSHALL").query::<String>(&mut conn);
break client;
}
}
}
thread::sleep(Duration::from_millis(100));
};
Self {
client,
server: Arc::new(server),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/metrics/sync_service.rs | crates/orchestrator/src/metrics/sync_service.rs | use crate::metrics::MetricsContext;
use crate::plugins::node_groups::NodeGroupsPlugin;
use crate::store::core::StoreContext;
use crate::ServerMode;
use log::{debug, error, info};
use shared::models::task::Task;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::time::interval;
pub struct MetricsSyncService {
store_context: Arc<StoreContext>,
metrics_context: Arc<MetricsContext>,
server_mode: ServerMode,
sync_interval: Duration,
node_groups_plugin: Option<Arc<NodeGroupsPlugin>>,
}
impl MetricsSyncService {
pub fn new(
store_context: Arc<StoreContext>,
metrics_context: Arc<MetricsContext>,
server_mode: ServerMode,
sync_interval_seconds: u64,
node_groups_plugin: Option<Arc<NodeGroupsPlugin>>,
) -> Self {
Self {
store_context,
metrics_context,
server_mode,
sync_interval: Duration::from_secs(sync_interval_seconds),
node_groups_plugin,
}
}
/// Format task metadata into a structured string for Prometheus labels
/// Example: "model:qwen3-4b|dataset:intellect-2-rl|version:v1"
fn format_task_metadata(task: &Task) -> String {
if let Some(metadata) = &task.metadata {
if let Some(labels) = &metadata.labels {
if !labels.is_empty() {
return labels
.iter()
.map(|(k, v)| format!("{k}:{v}"))
.collect::<Vec<_>>()
.join("|");
}
}
}
"".to_string()
}
/// Efficiently get all node-to-group mappings including both group_id and group_config_name
/// Returns a HashMap where key is node_address and value is (group_id, group_config_name)
async fn get_all_node_group_info(&self) -> anyhow::Result<HashMap<String, (String, String)>> {
if let Some(node_groups_plugin) = &self.node_groups_plugin {
// First get all node to group_id mappings
let node_to_group_mappings =
match node_groups_plugin.get_all_node_group_mappings().await {
Ok(mappings) => mappings,
Err(e) => {
error!("Failed to get node group mappings: {e}");
return Ok(HashMap::new());
}
};
// Then get all groups to get their configuration names
let groups = match node_groups_plugin.get_all_groups().await {
Ok(groups) => groups,
Err(e) => {
error!("Failed to get all groups: {e}");
return Ok(HashMap::new());
}
};
// Create a mapping from group_id to configuration_name
let group_id_to_config: HashMap<String, String> = groups
.into_iter()
.map(|group| (group.id, group.configuration_name))
.collect();
// Combine the mappings to create node_address -> (group_id, group_config_name)
let mut result = HashMap::new();
for (node_address, group_id) in node_to_group_mappings {
if let Some(config_name) = group_id_to_config.get(&group_id) {
result.insert(node_address, (group_id, config_name.clone()));
} else {
// If we can't find the config name, still include the group_id
debug!("No configuration name found for group_id: {group_id}");
result.insert(node_address, (group_id, "unknown".to_string()));
}
}
Ok(result)
} else {
Ok(HashMap::new())
}
}
pub async fn run(&self) -> anyhow::Result<()> {
// Only run the sync service on ProcessorOnly or Full mode instances
if !matches!(
self.server_mode,
ServerMode::ProcessorOnly | ServerMode::Full
) {
debug!("Metrics sync service disabled for ApiOnly mode");
return Ok(());
}
info!(
"Starting metrics sync service (interval: {:?})",
self.sync_interval
);
let mut interval = interval(self.sync_interval);
loop {
interval.tick().await;
if let Err(e) = self.sync_metrics_from_redis().await {
error!("Error syncing metrics from Redis: {e}");
}
if let Err(e) = self.sync_orchestrator_statistics().await {
error!("Error syncing orchestrator statistics: {e}");
}
}
}
pub async fn sync_metrics_from_redis(&self) -> anyhow::Result<()> {
debug!("Syncing metrics from Redis to Prometheus");
// Get all metrics from Redis
let redis_metrics = match self.store_context.metrics_store.get_all_metrics().await {
Ok(metrics) => metrics,
Err(e) => {
error!("Failed to get metrics from Redis: {e}");
return Err(e);
}
};
// Get all tasks to map task_id to task_name
let tasks = self
.store_context
.task_store
.get_all_tasks()
.await
.unwrap_or_else(|e| {
error!("Failed to get tasks for task name mapping: {e}");
Vec::new()
});
let task_name_map: HashMap<String, String> = tasks
.into_iter()
.map(|task| (task.id.to_string(), task.name.clone()))
.collect();
let node_to_group_info = if self.node_groups_plugin.is_some() {
match self.get_all_node_group_info().await {
Ok(info) => info,
Err(e) => {
error!("Failed to get node group info: {e}");
HashMap::new()
}
}
} else {
HashMap::new()
};
// Clear existing Prometheus metrics
self.metrics_context.clear_compute_task_metrics();
// Rebuild metrics from Redis data
let mut total_metrics = 0;
for (task_id, task_metrics) in redis_metrics {
let task_name = task_name_map.get(&task_id).cloned().unwrap_or_else(|| {
debug!("No task name found for task_id: {task_id}");
"unknown".to_string()
});
for (label, node_metrics) in task_metrics {
for (node_address, value) in node_metrics {
let (group_id, group_config_name) = node_to_group_info
.get(&node_address)
.map(|(id, config)| (Some(id.as_str()), Some(config.as_str())))
.unwrap_or((None, None));
self.metrics_context.record_compute_task_gauge(
&node_address,
&task_id,
&task_name,
&label,
value,
group_id,
group_config_name,
);
total_metrics += 1;
}
}
}
debug!("Synced {total_metrics} metric entries from Redis to Prometheus");
Ok(())
}
pub async fn sync_orchestrator_statistics(&self) -> anyhow::Result<()> {
debug!("Syncing orchestrator statistics to Prometheus");
// Clear existing orchestrator statistics
self.metrics_context.clear_orchestrator_statistics();
// Get nodes once and reuse for multiple statistics
let nodes = match self.store_context.node_store.get_nodes().await {
Ok(nodes) => nodes,
Err(e) => {
error!("Failed to get nodes for statistics: {e}");
Vec::new()
}
};
// Sync nodes count by status
let mut status_counts: HashMap<String, i32> = HashMap::new();
for node in &nodes {
let status = format!("{:?}", node.status).to_lowercase();
*status_counts.entry(status).or_insert(0) += 1;
}
for (status, count) in status_counts {
self.metrics_context.set_nodes_count(&status, count as f64);
}
debug!("Synced node statistics");
// Sync total tasks count (simple count, not by state)
if let Ok(tasks) = self.store_context.task_store.get_all_tasks().await {
let total_tasks = tasks.len() as f64;
self.metrics_context.set_tasks_count(total_tasks);
debug!("Synced task statistics: {total_tasks} total tasks");
// Sync task info metrics with metadata
for task in &tasks {
let task_id = task.id.to_string();
let metadata = Self::format_task_metadata(task);
self.metrics_context
.set_task_info(&task_id, &task.name, &metadata);
}
debug!("Synced task info metrics with metadata");
// Sync nodes per task based on node assignments
// Create task name mapping
let task_name_map: HashMap<String, String> = tasks
.into_iter()
.map(|task| (task.id.to_string(), task.name.clone()))
.collect();
// Count nodes per task
let mut task_node_counts: HashMap<String, i32> = HashMap::new();
for node in &nodes {
if let Some(task_id) = &node.task_id {
*task_node_counts.entry(task_id.clone()).or_insert(0) += 1;
}
}
// Set metrics for each task with active nodes
for (task_id, count) in task_node_counts {
let task_name = task_name_map.get(&task_id).cloned().unwrap_or_else(|| {
debug!("No task name found for task_id: {task_id}");
"unknown".to_string()
});
self.metrics_context
.set_nodes_per_task(&task_id, &task_name, count as f64);
}
debug!("Synced nodes per task statistics");
} else {
error!("Failed to get tasks for statistics");
}
// Sync groups count by configuration name
if let Some(node_groups_plugin) = &self.node_groups_plugin {
if let Ok(groups) = node_groups_plugin.get_all_groups().await {
let mut config_counts: HashMap<String, i32> = HashMap::new();
for group in groups {
*config_counts.entry(group.configuration_name).or_insert(0) += 1;
}
for (config_name, count) in config_counts {
self.metrics_context
.set_groups_count(&config_name, count as f64);
}
debug!("Synced group statistics");
} else {
error!("Failed to get groups for statistics");
}
} else {
debug!("Node groups plugin not available, skipping groups metrics");
}
debug!("Completed syncing orchestrator statistics");
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use shared::models::task::{Task, TaskMetadata, TaskState};
use std::collections::HashMap;
use uuid::Uuid;
#[test]
fn test_format_task_metadata_with_labels() {
let mut labels = HashMap::new();
labels.insert("model".to_string(), "qwen3-4b".to_string());
labels.insert("dataset".to_string(), "intellect-2-rl-dataset".to_string());
labels.insert("version".to_string(), "v1".to_string());
let task = Task {
id: Uuid::new_v4(),
image: "test".to_string(),
name: "test".to_string(),
state: TaskState::PENDING,
metadata: Some(TaskMetadata {
labels: Some(labels),
}),
..Default::default()
};
let formatted = MetricsSyncService::format_task_metadata(&task);
// The format should be key:value pairs separated by |
// Order might vary due to HashMap iteration
assert!(formatted.contains("model:qwen3-4b"));
assert!(formatted.contains("dataset:intellect-2-rl-dataset"));
assert!(formatted.contains("version:v1"));
assert_eq!(formatted.matches('|').count(), 2); // Should have 2 separators for 3 labels
}
#[test]
fn test_format_task_metadata_empty() {
let task = Task {
id: Uuid::new_v4(),
image: "test".to_string(),
name: "test".to_string(),
state: TaskState::PENDING,
metadata: None,
..Default::default()
};
let formatted = MetricsSyncService::format_task_metadata(&task);
assert_eq!(formatted, "");
}
#[test]
fn test_format_task_metadata_empty_labels() {
let task = Task {
id: Uuid::new_v4(),
image: "test".to_string(),
name: "test".to_string(),
state: TaskState::PENDING,
metadata: Some(TaskMetadata {
labels: Some(HashMap::new()),
}),
..Default::default()
};
let formatted = MetricsSyncService::format_task_metadata(&task);
assert_eq!(formatted, "");
}
#[test]
fn test_format_task_metadata_no_labels() {
let task = Task {
id: Uuid::new_v4(),
image: "test".to_string(),
name: "test".to_string(),
state: TaskState::PENDING,
metadata: Some(TaskMetadata { labels: None }),
..Default::default()
};
let formatted = MetricsSyncService::format_task_metadata(&task);
assert_eq!(formatted, "");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/metrics/webhook_sender.rs | crates/orchestrator/src/metrics/webhook_sender.rs | use crate::plugins::webhook::WebhookPlugin;
use crate::store::core::StoreContext;
use anyhow::Result;
use log::{error, info};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use tokio::time::interval;
pub struct MetricsWebhookSender {
store_context: Arc<StoreContext>,
webhook_plugins: Vec<WebhookPlugin>,
last_sent_metrics: HashMap<String, f64>,
pool_id: u32,
}
impl MetricsWebhookSender {
pub fn new(
store_context: Arc<StoreContext>,
webhook_plugins: Vec<WebhookPlugin>,
pool_id: u32,
) -> Self {
Self {
store_context,
webhook_plugins,
last_sent_metrics: HashMap::new(),
pool_id,
}
}
pub fn metrics_changed(
metrics: &HashMap<String, f64>,
last_sent_metrics: &HashMap<String, f64>,
) -> bool {
if metrics.len() != last_sent_metrics.len() {
return true;
}
// FP imprecision fix
const EPSILON: f64 = 1e-10;
for (key, value) in metrics {
match last_sent_metrics.get(key) {
None => return true,
Some(last_value) if (last_value - value).abs() > EPSILON => return true,
_ => continue,
}
}
false
}
pub async fn run(&mut self) -> Result<()> {
let mut interval = interval(Duration::from_secs(15));
loop {
interval.tick().await;
let metrics = match self
.store_context
.metrics_store
.get_aggregate_metrics_for_all_tasks()
.await
{
Ok(metrics) => metrics,
Err(e) => {
error!("Error getting aggregate metrics for all tasks: {e}");
continue;
}
};
if Self::metrics_changed(&metrics, &self.last_sent_metrics) {
info!("Sending {} metrics via webhook", metrics.len());
for plugin in &self.webhook_plugins {
let _ = plugin.send_metrics_updated(self.pool_id, metrics.clone());
}
// Update last sent metrics
self.last_sent_metrics = metrics.clone();
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_metrics_changed() {
let mut metrics = HashMap::new();
metrics.insert("test_metric".to_string(), 1.0);
metrics.insert("metric_2".to_string(), 2.0);
let mut last_sent_metrics = HashMap::new();
last_sent_metrics.insert("metric_2".to_string(), 2.0);
last_sent_metrics.insert("test_metric".to_string(), 1.0);
let metrics_changed = MetricsWebhookSender::metrics_changed(&metrics, &last_sent_metrics);
assert!(!metrics_changed);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/metrics/mod.rs | crates/orchestrator/src/metrics/mod.rs | use prometheus::{CounterVec, GaugeVec, HistogramOpts, HistogramVec, Opts, Registry, TextEncoder};
pub(crate) mod sync_service;
pub(crate) mod webhook_sender;
pub struct MetricsContext {
pub compute_task_gauges: GaugeVec,
pub task_info: GaugeVec,
pub pool_id: String,
pub registry: Registry,
pub file_upload_requests_total: CounterVec,
pub nodes_total: GaugeVec,
pub tasks_total: GaugeVec,
pub groups_total: GaugeVec,
pub heartbeat_requests_total: CounterVec,
pub nodes_per_task: GaugeVec,
pub task_state: GaugeVec,
pub status_update_execution_time: HistogramVec,
}
impl MetricsContext {
pub fn new(pool_id: String) -> Self {
// For current state/rate metrics
let compute_task_gauges = GaugeVec::new(
Opts::new("compute_gauges", "Compute task gauge metrics"),
&[
"node_address",
"task_id",
"task_name",
"label",
"pool_id",
"group_id",
"group_config_name",
],
)
.unwrap();
let task_info = GaugeVec::new(
Opts::new("task_info", "Task information with metadata"),
&["task_id", "task_name", "pool_id", "metadata"],
)
.unwrap();
// New metrics for orchestrator statistics
let file_upload_requests_total = CounterVec::new(
Opts::new(
"orchestrator_file_upload_requests_total",
"Total number of file upload requests",
),
&["task_id", "task_name", "node_address", "pool_id"],
)
.unwrap();
let nodes_total = GaugeVec::new(
Opts::new(
"orchestrator_nodes_total",
"Total number of nodes by status",
),
&["status", "pool_id"],
)
.unwrap();
let tasks_total = GaugeVec::new(
Opts::new("orchestrator_tasks_total", "Total number of tasks"),
&["pool_id"],
)
.unwrap();
let groups_total = GaugeVec::new(
Opts::new(
"orchestrator_groups_total",
"Total number of node groups by configuration",
),
&["configuration_name", "pool_id"],
)
.unwrap();
let heartbeat_requests_total = CounterVec::new(
Opts::new(
"orchestrator_heartbeat_requests_total",
"Total number of heartbeat requests per node",
),
&["node_address", "pool_id"],
)
.unwrap();
let nodes_per_task = GaugeVec::new(
Opts::new(
"orchestrator_nodes_per_task",
"Number of nodes actively working on each task",
),
&["task_id", "task_name", "pool_id"],
)
.unwrap();
let task_state = GaugeVec::new(
Opts::new(
"orchestrator_task_state",
"Task state reported from nodes (1 for active state, 0 for inactive)",
),
&["node_address", "task_id", "task_state", "pool_id"],
)
.unwrap();
let status_update_execution_time = HistogramVec::new(
HistogramOpts::new(
"orchestrator_status_update_execution_time_seconds",
"Duration of status update execution",
)
.buckets(vec![
0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 30.0, 45.0, 60.0, 90.0,
120.0,
]),
&["node_address", "pool_id"],
)
.unwrap();
let registry = Registry::new();
let _ = registry.register(Box::new(compute_task_gauges.clone()));
let _ = registry.register(Box::new(task_info.clone()));
let _ = registry.register(Box::new(file_upload_requests_total.clone()));
let _ = registry.register(Box::new(nodes_total.clone()));
let _ = registry.register(Box::new(tasks_total.clone()));
let _ = registry.register(Box::new(groups_total.clone()));
let _ = registry.register(Box::new(heartbeat_requests_total.clone()));
let _ = registry.register(Box::new(nodes_per_task.clone()));
let _ = registry.register(Box::new(task_state.clone()));
let _ = registry.register(Box::new(status_update_execution_time.clone()));
Self {
compute_task_gauges,
task_info,
pool_id,
registry,
file_upload_requests_total,
nodes_total,
tasks_total,
groups_total,
heartbeat_requests_total,
nodes_per_task,
task_state,
status_update_execution_time,
}
}
#[allow(clippy::too_many_arguments)]
pub fn record_compute_task_gauge(
&self,
node_address: &str,
task_id: &str,
task_name: &str,
label: &str,
value: f64,
group_id: Option<&str>,
group_config_name: Option<&str>,
) {
let group_id_str = group_id.unwrap_or("none");
let group_config_name_str = group_config_name.unwrap_or("none");
self.compute_task_gauges
.with_label_values(&[
node_address,
task_id,
task_name,
label,
&self.pool_id,
group_id_str,
group_config_name_str,
])
.set(value);
}
pub fn increment_file_upload_requests(
&self,
task_id: &str,
task_name: &str,
node_address: &str,
) {
self.file_upload_requests_total
.with_label_values(&[task_id, task_name, node_address, &self.pool_id])
.inc();
}
pub fn increment_heartbeat_requests(&self, node_address: &str) {
self.heartbeat_requests_total
.with_label_values(&[node_address, &self.pool_id])
.inc();
}
pub fn set_nodes_count(&self, status: &str, count: f64) {
self.nodes_total
.with_label_values(&[status, &self.pool_id])
.set(count);
}
pub fn set_tasks_count(&self, count: f64) {
self.tasks_total
.with_label_values(&[&self.pool_id])
.set(count);
}
pub fn set_groups_count(&self, configuration_name: &str, count: f64) {
self.groups_total
.with_label_values(&[configuration_name, &self.pool_id])
.set(count);
}
pub fn set_nodes_per_task(&self, task_id: &str, task_name: &str, count: f64) {
self.nodes_per_task
.with_label_values(&[task_id, task_name, &self.pool_id])
.set(count);
}
pub fn set_task_info(&self, task_id: &str, task_name: &str, metadata: &str) {
self.task_info
.with_label_values(&[task_id, task_name, &self.pool_id, metadata])
.set(1.0);
}
pub fn set_task_state(&self, node_address: &str, task_id: &str, task_state: &str) {
self.task_state
.with_label_values(&[node_address, task_id, task_state, &self.pool_id])
.set(1.0);
}
pub fn export_metrics(&self) -> Result<String, prometheus::Error> {
let encoder = TextEncoder::new();
let metric_families = self.registry.gather();
encoder.encode_to_string(&metric_families)
}
/// Clear all metrics from the registry
pub fn clear_compute_task_metrics(&self) {
// Clear all time series from the compute_task_gauges metric family
// This removes all existing metrics so we can rebuild from Redis
self.compute_task_gauges.reset();
}
pub fn record_status_update_execution_time(&self, node_address: &str, duration: f64) {
self.status_update_execution_time
.with_label_values(&[node_address, &self.pool_id])
.observe(duration);
}
/// Clear all orchestrator statistics metrics
pub fn clear_orchestrator_statistics(&self) {
self.nodes_total.reset();
self.tasks_total.reset();
self.groups_total.reset();
self.nodes_per_task.reset();
self.task_info.reset();
self.task_state.reset();
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/models/node.rs | crates/orchestrator/src/models/node.rs | use alloy::primitives::Address;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use shared::models::heartbeat::TaskDetails;
use shared::models::node::{ComputeSpecs, DiscoveryNode, NodeLocation};
use shared::models::task::TaskState;
use std::fmt::{self, Display};
use utoipa::ToSchema;
#[derive(Debug, Clone, Serialize, Deserialize, Default, ToSchema)]
pub struct OrchestratorNode {
#[serde(serialize_with = "serialize_address")]
#[schema(value_type = String, example = "0x742d35Cc6634C0532925a3b8D6Ac6f29d1e6b8e0")]
pub address: Address,
pub ip_address: String,
pub port: u16,
pub status: NodeStatus,
pub task_id: Option<String>,
pub task_state: Option<TaskState>,
#[serde(default)]
pub task_details: Option<TaskDetails>,
pub version: Option<String>,
pub p2p_id: Option<String>,
pub last_status_change: Option<DateTime<Utc>>,
#[serde(default)]
pub first_seen: Option<DateTime<Utc>>,
#[serde(default)]
pub compute_specs: Option<ComputeSpecs>,
#[serde(default)]
pub worker_p2p_id: Option<String>,
#[serde(default)]
pub worker_p2p_addresses: Option<Vec<String>>,
#[serde(default)]
pub location: Option<NodeLocation>,
}
fn serialize_address<S>(address: &Address, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&address.to_string())
}
impl From<DiscoveryNode> for OrchestratorNode {
fn from(discovery_node: DiscoveryNode) -> Self {
Self {
address: discovery_node.id.parse().unwrap(),
ip_address: discovery_node.ip_address.clone(),
port: discovery_node.port,
status: NodeStatus::Discovered,
task_id: None,
task_state: None,
version: None,
p2p_id: None,
last_status_change: None,
first_seen: None,
task_details: None,
compute_specs: discovery_node.compute_specs.clone(),
worker_p2p_id: discovery_node.worker_p2p_id.clone(),
worker_p2p_addresses: discovery_node.worker_p2p_addresses.clone(),
location: discovery_node.location.clone(),
}
}
}
impl fmt::Display for OrchestratorNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", serde_json::to_string(self).unwrap())
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default, ToSchema)]
pub enum NodeStatus {
#[default]
Discovered,
WaitingForHeartbeat,
Healthy,
Unhealthy,
Dead,
Ejected,
Banned,
LowBalance,
}
impl Display for NodeStatus {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{self:?}")
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/models/mod.rs | crates/orchestrator/src/models/mod.rs | pub(crate) mod node;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/utils/loop_heartbeats.rs | crates/orchestrator/src/utils/loop_heartbeats.rs | use serde::Serialize;
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use utoipa::ToSchema;
use crate::ServerMode;
#[derive(Serialize, ToSchema)]
pub struct HealthStatus {
pub healthy: bool,
pub inviter_last_run_seconds_ago: i64,
pub monitor_last_run_seconds_ago: i64,
pub status_updater_last_run_seconds_ago: i64,
pub node_groups_last_run_seconds_ago: i64,
}
pub struct LoopHeartbeats {
last_inviter_iteration: Arc<AtomicI64>,
last_monitor_iteration: Arc<AtomicI64>,
last_status_updater_iteration: Arc<AtomicI64>,
last_node_groups_iteration: Arc<AtomicI64>,
server_mode: ServerMode,
}
impl LoopHeartbeats {
pub fn new(server_mode: &ServerMode) -> Self {
Self {
last_inviter_iteration: Arc::new(AtomicI64::new(-1)),
last_monitor_iteration: Arc::new(AtomicI64::new(-1)),
last_status_updater_iteration: Arc::new(AtomicI64::new(-1)),
last_node_groups_iteration: Arc::new(AtomicI64::new(-1)),
server_mode: *server_mode,
}
}
pub fn update_inviter(&self) {
self.last_inviter_iteration.store(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
Ordering::SeqCst,
);
}
pub fn update_monitor(&self) {
self.last_monitor_iteration.store(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
Ordering::SeqCst,
);
}
pub fn update_status_updater(&self) {
self.last_status_updater_iteration.store(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
Ordering::SeqCst,
);
}
pub fn update_node_groups(&self) {
self.last_node_groups_iteration.store(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
Ordering::SeqCst,
);
}
pub fn health_status(&self, with_node_groups: bool) -> HealthStatus {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
let _two_minutes = 120;
let inviter_last = self.last_inviter_iteration.load(Ordering::SeqCst);
let monitor_last = self.last_monitor_iteration.load(Ordering::SeqCst);
let status_updater_last = self.last_status_updater_iteration.load(Ordering::SeqCst);
let node_groups_last = self.last_node_groups_iteration.load(Ordering::SeqCst);
// Calculate seconds ago for each operation
let inviter_seconds_ago = if inviter_last > 0 {
now - inviter_last
} else {
-1
};
let monitor_seconds_ago = if monitor_last > 0 {
now - monitor_last
} else {
-1
};
let status_updater_seconds_ago = if status_updater_last > 0 {
now - status_updater_last
} else {
-1
};
let node_groups_seconds_ago = if node_groups_last > 0 {
now - node_groups_last
} else {
-1
};
let mut processes_healthy = inviter_seconds_ago < _two_minutes
&& inviter_seconds_ago != -1
&& monitor_seconds_ago < _two_minutes
&& monitor_seconds_ago != -1
&& status_updater_seconds_ago < _two_minutes
&& status_updater_seconds_ago != -1;
if with_node_groups {
processes_healthy = processes_healthy
&& node_groups_seconds_ago < _two_minutes
&& node_groups_seconds_ago != -1;
}
let healthy = match self.server_mode {
// TODO: in the future we might want to check if the redis connection is healthy
ServerMode::ApiOnly => true,
_ => processes_healthy,
};
HealthStatus {
healthy,
inviter_last_run_seconds_ago: inviter_seconds_ago,
monitor_last_run_seconds_ago: monitor_seconds_ago,
status_updater_last_run_seconds_ago: status_updater_seconds_ago,
node_groups_last_run_seconds_ago: node_groups_seconds_ago,
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/utils/mod.rs | crates/orchestrator/src/utils/mod.rs | pub(crate) mod loop_heartbeats;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/scheduler/mod.rs | crates/orchestrator/src/scheduler/mod.rs | use alloy::primitives::Address;
use shared::models::task::Task;
use std::sync::Arc;
use crate::plugins::{newest_task::NewestTaskPlugin, SchedulerPlugin};
use crate::store::core::StoreContext;
use anyhow::Result;
pub struct Scheduler {
store_context: Arc<StoreContext>,
plugins: Vec<SchedulerPlugin>,
}
impl Scheduler {
pub fn new(store_context: Arc<StoreContext>, plugins: Vec<SchedulerPlugin>) -> Self {
let mut plugins = plugins;
if plugins.is_empty() {
plugins.push(NewestTaskPlugin.into());
}
Self {
store_context,
plugins,
}
}
pub async fn get_task_for_node(&self, node_address: Address) -> Result<Option<Task>> {
let mut all_tasks = self.store_context.task_store.get_all_tasks().await?;
for plugin in self.plugins.iter() {
let filtered_tasks = plugin.filter_tasks(&all_tasks, &node_address).await?;
all_tasks = filtered_tasks;
}
if !all_tasks.is_empty() {
let mut task = all_tasks[0].clone();
// Replace variables in env_vars
if let Some(env_vars) = &mut task.env_vars {
for (_, value) in env_vars.iter_mut() {
let new_value = value
.replace("${TASK_ID}", &task.id.to_string())
.replace("${NODE_ADDRESS}", &node_address.to_string());
*value = new_value;
}
}
// Replace variables in cmd
if let Some(cmd) = &mut task.cmd {
for arg in cmd.iter_mut() {
*arg = arg
.replace("${TASK_ID}", &task.id.to_string())
.replace("${NODE_ADDRESS}", &node_address.to_string());
}
}
// Replace variables in volume mounts
if let Some(volume_mounts) = &mut task.volume_mounts {
// Extract group_id from metadata labels if available
for volume_mount in volume_mounts.iter_mut() {
// Use the replace_labels method with all variables
let processed = volume_mount
.replace_labels(&task.id.to_string(), Some(&node_address.to_string()));
// Replace the mount with the processed version
*volume_mount = processed;
}
}
return Ok(Some(task));
}
Ok(None)
}
}
#[cfg(test)]
mod tests {
use shared::models::task::TaskState;
use std::collections::HashMap;
use uuid::Uuid;
use crate::api::tests::helper::create_test_app_state;
use super::*;
#[tokio::test]
async fn test_get_task_for_node() {
let state = create_test_app_state().await;
let scheduler = Scheduler::new(state.store_context.clone(), vec![]);
let task = Task {
id: Uuid::new_v4(),
image: "image".to_string(),
name: "name".to_string(),
state: TaskState::PENDING,
created_at: 1,
..Default::default()
};
let _ = state.store_context.task_store.add_task(task.clone()).await;
let task_for_node = scheduler.get_task_for_node(Address::ZERO).await.unwrap();
assert_eq!(task_for_node, Some(task));
}
#[tokio::test]
async fn test_variable_replacement() {
let state = create_test_app_state().await;
let scheduler = Scheduler::new(state.store_context.clone(), vec![]);
let node_address = Address::from([1u8; 20]);
let mut env_vars = HashMap::new();
env_vars.insert("TASK_ID_VAR".to_string(), "task-${TASK_ID}".to_string());
env_vars.insert("NODE_VAR".to_string(), "node-${NODE_ADDRESS}".to_string());
let task = Task {
id: Uuid::new_v4(),
image: "image".to_string(),
name: "name".to_string(),
state: TaskState::PENDING,
created_at: 1,
env_vars: Some(env_vars),
cmd: Some(vec![
"--task=${TASK_ID}".to_string(),
"--node=${NODE_ADDRESS}".to_string(),
]),
entrypoint: None,
..Default::default()
};
let _ = state.store_context.task_store.add_task(task.clone()).await;
let result = scheduler.get_task_for_node(node_address).await.unwrap();
assert!(result.is_some());
let returned_task = result.unwrap();
// Check env vars replacement
let env_vars = returned_task.env_vars.unwrap();
assert_eq!(
env_vars.get("TASK_ID_VAR").unwrap(),
&format!("task-{}", task.id)
);
assert_eq!(
env_vars.get("NODE_VAR").unwrap(),
&format!("node-{}", node_address)
);
// Check cmd replacement
let cmd = returned_task.cmd.unwrap();
assert_eq!(cmd[0], format!("--task={}", task.id));
assert_eq!(cmd[1], format!("--node={}", node_address));
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/status_update/mod.rs | crates/orchestrator/src/status_update/mod.rs | use crate::metrics::MetricsContext;
use crate::models::node::{NodeStatus, OrchestratorNode};
use crate::plugins::StatusUpdatePlugin;
use crate::store::core::StoreContext;
use crate::utils::loop_heartbeats::LoopHeartbeats;
use futures::stream::FuturesUnordered;
use log::{debug, error, info};
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::wallet::WalletProvider;
use std::result::Result;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::time::interval;
pub struct NodeStatusUpdater {
store_context: Arc<StoreContext>,
update_interval: u64,
missing_heartbeat_threshold: u32,
contracts: Contracts<WalletProvider>,
pool_id: u32,
disable_ejection: bool,
heartbeats: Arc<LoopHeartbeats>,
plugins: Vec<StatusUpdatePlugin>,
metrics: Arc<MetricsContext>,
}
impl NodeStatusUpdater {
#[allow(clippy::too_many_arguments)]
pub fn new(
store_context: Arc<StoreContext>,
update_interval: u64,
missing_heartbeat_threshold: Option<u32>,
contracts: Contracts<WalletProvider>,
pool_id: u32,
disable_ejection: bool,
heartbeats: Arc<LoopHeartbeats>,
plugins: Vec<StatusUpdatePlugin>,
metrics: Arc<MetricsContext>,
) -> Self {
Self {
store_context,
update_interval,
missing_heartbeat_threshold: missing_heartbeat_threshold.unwrap_or(3),
contracts,
pool_id,
disable_ejection,
heartbeats,
plugins,
metrics,
}
}
pub async fn run(&self) -> Result<(), anyhow::Error> {
let mut interval = interval(Duration::from_secs(self.update_interval));
loop {
interval.tick().await;
debug!("Running NodeStatusUpdater to process nodes heartbeats");
if let Err(e) = self.process_nodes().await {
error!("Error processing nodes: {e}");
}
if let Err(e) = self.sync_chain_with_nodes().await {
error!("Error syncing chain with nodes: {e}");
}
self.heartbeats.update_status_updater();
}
}
#[cfg(test)]
fn is_node_in_pool(&self, _: &OrchestratorNode) -> bool {
true
}
#[cfg(not(test))]
async fn is_node_in_pool(&self, node: &OrchestratorNode) -> bool {
let node_in_pool: bool = (self
.contracts
.compute_pool
.is_node_in_pool(self.pool_id, node.address)
.await)
.unwrap_or(false);
node_in_pool
}
async fn sync_dead_node_with_chain(
&self,
node: &OrchestratorNode,
) -> Result<(), anyhow::Error> {
#[cfg(test)]
let node_in_pool = self.is_node_in_pool(node);
#[cfg(not(test))]
let node_in_pool = self.is_node_in_pool(node).await;
if node_in_pool {
match self
.contracts
.compute_pool
.eject_node(self.pool_id, node.address)
.await
{
Result::Ok(_) => {
info!("Ejected node: {:?}", node.address);
return Ok(());
}
Result::Err(e) => {
error!("Error ejecting node: {e}");
return Err(anyhow::anyhow!("Error ejecting node: {}", e));
}
}
} else {
debug!(
"Dead Node {:?} is not in pool, skipping ejection",
node.address
);
}
Ok(())
}
pub async fn sync_chain_with_nodes(&self) -> Result<(), anyhow::Error> {
let nodes = self.store_context.node_store.get_nodes().await?;
for node in nodes {
if node.status == NodeStatus::Dead {
#[cfg(test)]
let node_in_pool = self.is_node_in_pool(&node);
#[cfg(not(test))]
let node_in_pool = self.is_node_in_pool(&node).await;
debug!("Node {:?} is in pool: {}", node.address, node_in_pool);
if node_in_pool {
if !self.disable_ejection {
if let Err(e) = self.sync_dead_node_with_chain(&node).await {
error!("Error syncing dead node with chain: {e}");
}
} else {
debug!(
"Ejection is disabled, skipping ejection of node: {:?}",
node.address
);
}
}
}
}
Ok(())
}
pub async fn process_nodes(&self) -> Result<(), anyhow::Error> {
use futures::StreamExt as _;
let nodes = self.store_context.node_store.get_nodes().await?;
let futures = FuturesUnordered::new();
for node in nodes {
let store_context = self.store_context.clone();
let contracts = self.contracts.clone();
let pool_id = self.pool_id;
let missing_heartbeat_threshold = self.missing_heartbeat_threshold;
let plugins = self.plugins.clone();
let metrics = self.metrics.clone();
futures.push(async move {
let address = node.address;
(
process_node(
node,
store_context,
contracts,
pool_id,
missing_heartbeat_threshold,
plugins,
metrics,
)
.await,
address,
)
});
}
let results: Vec<_> = futures.collect().await;
for result in results {
match result {
(Ok(()), address) => {
debug!("Successfully processed node: {address:?}");
}
(Err(e), address) => {
error!("Error processing node {address:?}: {e}");
}
}
}
Ok(())
}
}
async fn process_node(
node: OrchestratorNode,
store_context: Arc<StoreContext>,
contracts: Contracts<WalletProvider>,
pool_id: u32,
missing_heartbeat_threshold: u32,
plugins: Vec<StatusUpdatePlugin>,
metrics: Arc<MetricsContext>,
) -> Result<(), anyhow::Error> {
let start_time = Instant::now();
let old_status = node.status.clone();
let heartbeat = store_context
.heartbeat_store
.get_heartbeat(&node.address)
.await?;
let unhealthy_counter: u32 = store_context
.heartbeat_store
.get_unhealthy_counter(&node.address)
.await?;
let is_node_in_pool = is_node_in_pool(contracts, pool_id, &node).await;
let mut status_changed = false;
let mut new_status = node.status.clone();
match heartbeat {
Some(beat) => {
// Update version if necessary
if let Some(version) = &beat.version {
if node.version.as_ref() != Some(version) {
if let Err(e) = store_context
.node_store
.update_node_version(&node.address, version)
.await
{
error!("Error updating node version: {e}");
}
}
}
// Check if the node is in the pool (needed for status transitions)
// If node is Unhealthy or WaitingForHeartbeat:
if node.status == NodeStatus::Unhealthy
|| node.status == NodeStatus::WaitingForHeartbeat
{
if is_node_in_pool {
new_status = NodeStatus::Healthy;
} else {
// Reset to discovered to init re-invite to pool
new_status = NodeStatus::Discovered;
}
status_changed = true;
}
// If node is Discovered or Dead:
else if node.status == NodeStatus::Discovered || node.status == NodeStatus::Dead {
if is_node_in_pool {
new_status = NodeStatus::Healthy;
} else {
new_status = NodeStatus::Discovered;
}
status_changed = true;
}
// Clear unhealthy counter on heartbeat receipt
if let Err(e) = store_context
.heartbeat_store
.clear_unhealthy_counter(&node.address)
.await
{
error!("Error clearing unhealthy counter: {e}");
}
}
None => {
// We don't have a heartbeat, increment unhealthy counter
if let Err(e) = store_context
.heartbeat_store
.increment_unhealthy_counter(&node.address)
.await
{
error!("Error incrementing unhealthy counter: {e}");
}
match node.status {
NodeStatus::Healthy => {
new_status = NodeStatus::Unhealthy;
status_changed = true;
}
NodeStatus::Unhealthy => {
if unhealthy_counter + 1 >= missing_heartbeat_threshold {
new_status = NodeStatus::Dead;
status_changed = true;
}
}
NodeStatus::Discovered => {
if is_node_in_pool {
// We have caught a very interesting edge case here.
// The node is in pool but does not send heartbeats - maybe due to a downtime of the orchestrator?
// Node invites fail now since the node cannot be in pool again.
// We have to eject and re-invite - we can simply do this by setting the status to unhealthy. The node will eventually be ejected.
new_status = NodeStatus::Unhealthy;
status_changed = true;
} else {
// if we've been trying to invite this node for a while, we eventually give up and mark it as dead
// The node will simply be in status discovered again when the discovery svc date > status change date.
if unhealthy_counter + 1 > 360 {
new_status = NodeStatus::Dead;
status_changed = true;
}
}
}
NodeStatus::WaitingForHeartbeat => {
if unhealthy_counter + 1 >= missing_heartbeat_threshold {
// Unhealthy counter is reset when node is invited
// usually it starts directly with heartbeat
new_status = NodeStatus::Unhealthy;
status_changed = true;
}
}
_ => (),
}
}
}
if status_changed {
// Clean up metrics when node becomes Dead, Ejected, or Banned
if matches!(
&new_status,
NodeStatus::Dead | NodeStatus::Ejected | NodeStatus::Banned
) {
let node_metrics = match store_context
.metrics_store
.get_metrics_for_node(node.address)
.await
{
Ok(metrics) => metrics,
Err(e) => {
error!("Error getting metrics for node: {e}");
Default::default()
}
};
for (task_id, task_metrics) in node_metrics {
for (label, _value) in task_metrics {
// Remove from Redis metrics store
if let Err(e) = store_context
.metrics_store
.delete_metric(&task_id, &label, &node.address.to_string())
.await
{
error!("Error deleting metric: {e}");
}
}
}
// Record status update execution time
let duration = start_time.elapsed();
metrics.record_status_update_execution_time(
&node.address.to_string(),
duration.as_secs_f64(),
);
}
if let Err(e) = store_context
.node_store
.update_node_status(&node.address, new_status)
.await
{
error!("Error updating node status: {e}");
}
if let Some(updated_node) = store_context.node_store.get_node(&node.address).await? {
for plugin in plugins.iter() {
if let Err(e) = plugin
.handle_status_change(&updated_node, &old_status)
.await
{
error!("Error handling status change: {e}");
}
}
}
}
Ok(())
}
#[cfg(test)]
async fn is_node_in_pool(_: Contracts<WalletProvider>, _: u32, _: &OrchestratorNode) -> bool {
true
}
#[cfg(not(test))]
async fn is_node_in_pool(
contracts: Contracts<WalletProvider>,
pool_id: u32,
node: &OrchestratorNode,
) -> bool {
contracts
.compute_pool
.is_node_in_pool(pool_id, node.address)
.await
.unwrap_or(false)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api::tests::helper::create_test_app_state_with_metrics;
use crate::api::tests::helper::setup_contract;
use crate::models::node::NodeStatus;
use crate::models::node::OrchestratorNode;
use crate::ServerMode;
use alloy::primitives::Address;
use shared::models::heartbeat::HeartbeatRequest;
use std::str::FromStr;
use std::time::Duration;
use tokio::time::sleep;
#[tokio::test]
async fn test_node_status_updater_runs() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::WaitingForHeartbeat,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
let heartbeat = HeartbeatRequest {
address: node.address.to_string(),
task_id: None,
task_state: None,
metrics: None,
version: Some(env!("CARGO_PKG_VERSION").to_string()),
timestamp: None,
p2p_id: None,
task_details: None,
};
if let Err(e) = app_state
.store_context
.heartbeat_store
.beat(&heartbeat)
.await
{
error!("Heartbeat Error: {}", e);
}
let _ = updater.process_nodes().await;
let node_opt = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap();
assert!(node_opt.is_some());
if let Some(node) = node_opt {
assert_eq!(node.status, NodeStatus::Healthy);
assert_ne!(node.last_status_change, None);
}
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap();
assert!(node.is_some());
if let Some(node) = node {
assert_eq!(node.status, NodeStatus::Healthy);
assert_ne!(node.last_status_change, None);
}
}
#[tokio::test]
async fn test_node_status_updater_runs_with_unhealthy_node() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Healthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap()
.unwrap();
assert_eq!(node.status, NodeStatus::Unhealthy);
assert_ne!(node.last_status_change, None);
}
#[tokio::test]
async fn test_node_status_with_unhealthy_node_but_no_counter() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Unhealthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap()
.unwrap();
assert_eq!(node.status, NodeStatus::Unhealthy);
let counter = app_state
.store_context
.heartbeat_store
.get_unhealthy_counter(&node.address)
.await
.unwrap();
assert_eq!(counter, 1);
assert_eq!(node.last_status_change, None);
}
#[tokio::test]
async fn test_node_status_updater_runs_with_dead_node() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Unhealthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
if let Err(e) = app_state
.store_context
.heartbeat_store
.set_unhealthy_counter(&node.address, 2)
.await
{
error!("Error setting unhealthy counter: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap();
assert!(node.is_some());
if let Some(node) = node {
assert_eq!(node.status, NodeStatus::Dead);
assert_ne!(node.last_status_change, None);
}
}
#[tokio::test]
async fn transition_from_unhealthy_to_healthy() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Unhealthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.heartbeat_store
.set_unhealthy_counter(&node.address, 2)
.await
{
error!("Error setting unhealthy counter: {}", e);
};
let heartbeat = HeartbeatRequest {
address: node.address.to_string(),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
task_details: None,
..Default::default()
};
if let Err(e) = app_state
.store_context
.heartbeat_store
.beat(&heartbeat)
.await
{
error!("Heartbeat Error: {}", e);
}
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap();
assert!(node.is_some());
if let Some(node) = node {
assert_eq!(node.status, NodeStatus::Healthy);
assert_ne!(node.last_status_change, None);
let counter = app_state
.store_context
.heartbeat_store
.get_unhealthy_counter(&node.address)
.await
.unwrap();
assert_eq!(counter, 0);
}
}
#[tokio::test]
async fn test_multiple_nodes_with_diff_status() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node1 = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Unhealthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.heartbeat_store
.set_unhealthy_counter(&node1.address, 1)
.await
{
error!("Error setting unhealthy counter: {}", e);
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node1.clone())
.await
{
error!("Error adding node: {}", e);
}
let node2 = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000001").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Healthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node2.clone())
.await
{
error!("Error adding node: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node1 = app_state
.store_context
.node_store
.get_node(&node1.address)
.await
.unwrap()
.unwrap();
assert_eq!(node1.status, NodeStatus::Unhealthy);
let counter = app_state
.store_context
.heartbeat_store
.get_unhealthy_counter(&node1.address)
.await
.unwrap();
assert_eq!(counter, 2);
let node2 = app_state
.store_context
.node_store
.get_node(&node2.address)
.await
.unwrap()
.unwrap();
assert_eq!(node2.status, NodeStatus::Unhealthy);
let counter = app_state
.store_context
.heartbeat_store
.get_unhealthy_counter(&node2.address)
.await
.unwrap();
assert_eq!(counter, 1);
}
#[tokio::test]
async fn test_node_rediscovery_after_death() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Unhealthy,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
if let Err(e) = app_state
.store_context
.heartbeat_store
.set_unhealthy_counter(&node.address, 2)
.await
{
error!("Error setting unhealthy counter: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap()
.unwrap();
assert_eq!(node.status, NodeStatus::Dead);
let heartbeat = HeartbeatRequest {
address: node.address.to_string(),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
task_details: None,
..Default::default()
};
if let Err(e) = app_state
.store_context
.heartbeat_store
.beat(&heartbeat)
.await
{
error!("Heartbeat Error: {}", e);
}
sleep(Duration::from_secs(5)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap()
.unwrap();
assert_eq!(node.status, NodeStatus::Healthy);
}
#[tokio::test]
async fn test_node_status_with_discovered_node() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x98dBe56Cac21e07693c558E4f27E7de4073e2aF3").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Discovered,
..Default::default()
};
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
.await
.expect("Failed to run NodeStatusUpdater");
});
sleep(Duration::from_secs(2)).await;
let node = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap()
.unwrap();
let counter = app_state
.store_context
.heartbeat_store
.get_unhealthy_counter(&node.address)
.await
.unwrap();
assert_eq!(counter, 1);
// Node has unhealthy counter
assert_eq!(node.status, NodeStatus::Unhealthy);
}
#[tokio::test]
async fn test_node_status_with_waiting_for_heartbeat() {
let app_state = create_test_app_state_with_metrics().await;
let contracts = setup_contract();
let node = OrchestratorNode {
address: Address::from_str("0x98dBe56Cac21e07693c558E4f27E7de4073e2aF3").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::WaitingForHeartbeat,
..Default::default()
};
let counter = app_state
.store_context
.heartbeat_store
.get_unhealthy_counter(&node.address)
.await
.unwrap();
assert_eq!(counter, 0);
if let Err(e) = app_state
.store_context
.node_store
.add_node(node.clone())
.await
{
error!("Error adding node: {}", e);
}
let mode = ServerMode::Full;
let updater = NodeStatusUpdater::new(
app_state.store_context.clone(),
5,
None,
contracts,
0,
false,
Arc::new(LoopHeartbeats::new(&mode)),
vec![],
app_state.metrics.clone(),
);
tokio::spawn(async move {
updater
.run()
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/mod.rs | crates/orchestrator/src/api/mod.rs | pub(crate) mod routes;
pub(crate) mod server;
pub(crate) mod tests;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/server.rs | crates/orchestrator/src/api/server.rs | use crate::api::routes::groups::groups_routes;
use crate::api::routes::nodes::nodes_routes;
use crate::api::routes::storage::storage_routes;
use crate::api::routes::task::tasks_routes;
use crate::api::routes::{heartbeat::heartbeat_routes, metrics::metrics_routes};
use crate::metrics::MetricsContext;
use crate::models::node::NodeStatus;
use crate::p2p::{GetTaskLogsRequest, RestartTaskRequest};
use crate::plugins::node_groups::NodeGroupsPlugin;
use crate::scheduler::Scheduler;
use crate::store::core::{RedisStore, StoreContext};
use crate::utils::loop_heartbeats::{HealthStatus, LoopHeartbeats};
use crate::ServerMode;
use actix_web::middleware::{Compress, NormalizePath, TrailingSlash};
use actix_web::{middleware, web::Data, App, HttpServer};
use actix_web::{web, HttpResponse};
use anyhow::Error;
use log::info;
use serde_json::json;
use shared::security::api_key_middleware::ApiKeyMiddleware;
use shared::security::auth_signature_middleware::{ValidateSignature, ValidatorState};
use shared::utils::StorageProvider;
use shared::web3::contracts::core::builder::Contracts;
use shared::web3::wallet::WalletProvider;
use std::sync::Arc;
use tokio::sync::mpsc::Sender;
use utoipa::{
openapi::security::{ApiKey, ApiKeyValue, SecurityScheme},
Modify, OpenApi,
};
use utoipa_swagger_ui::SwaggerUi;
struct SecurityAddon;
impl Modify for SecurityAddon {
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
if let Some(components) = openapi.components.as_mut() {
components.add_security_scheme(
"ApiKeyAuth",
SecurityScheme::ApiKey(ApiKey::Header(ApiKeyValue::new("Authorization"))),
);
}
}
}
#[derive(OpenApi)]
#[openapi(
info(
title = "Orchestrator API",
description = "Prime Intellect Orchestrator API",
version = "1.0.0"
),
paths(
crate::api::routes::task::get_all_tasks,
crate::api::routes::task::create_task,
crate::api::routes::task::delete_task,
crate::api::routes::task::delete_all_tasks,
crate::api::routes::nodes::get_nodes,
crate::api::routes::nodes::restart_node_task,
crate::api::routes::nodes::get_node_logs,
crate::api::routes::nodes::get_node_metrics,
crate::api::routes::nodes::ban_node,
crate::api::routes::metrics::get_metrics,
crate::api::routes::metrics::get_all_metrics,
crate::api::routes::metrics::get_prometheus_metrics,
crate::api::routes::metrics::create_metric,
crate::api::routes::metrics::delete_metric,
crate::api::routes::groups::get_groups,
crate::api::routes::groups::get_configurations,
crate::api::routes::groups::delete_group,
crate::api::routes::groups::get_group_logs,
crate::api::routes::groups::force_regroup,
),
security(
("ApiKeyAuth" = [])
),
modifiers(&SecurityAddon),
components(
schemas(
shared::models::api::ApiResponse<serde_json::Value>,
shared::models::task::Task,
shared::models::task::TaskRequest,
shared::models::node::Node,
crate::models::node::NodeStatus,
crate::models::node::OrchestratorNode,
shared::models::metric::MetricEntry,
shared::models::metric::MetricKey,
)
),
tags(
(name = "tasks", description = "Task management endpoints"),
(name = "nodes", description = "Node management endpoints"),
(name = "metrics", description = "Metrics collection endpoints"),
(name = "groups", description = "Node groups management endpoints"),
)
)]
struct ApiDoc;
#[utoipa::path(
get,
path = "/health",
responses(
(status = 200, description = "Service is healthy", body = HealthStatus),
(status = 500, description = "Service is unhealthy", body = HealthStatus)
),
tag = "health"
)]
async fn health_check(data: web::Data<AppState>) -> HttpResponse {
let health_status = data
.heartbeats
.health_status(data.node_groups_plugin.is_some());
if health_status.healthy {
HttpResponse::Ok().json(health_status)
} else {
HttpResponse::InternalServerError().json(health_status)
}
}
pub(crate) struct AppState {
pub(crate) store_context: Arc<StoreContext>,
pub(crate) storage_provider: Option<Arc<dyn StorageProvider>>,
pub(crate) heartbeats: Arc<LoopHeartbeats>,
pub(crate) redis_store: Arc<RedisStore>,
pub(crate) hourly_upload_limit: i64,
pub(crate) contracts: Option<Contracts<WalletProvider>>,
pub(crate) pool_id: u32,
pub(crate) scheduler: Scheduler,
pub(crate) node_groups_plugin: Option<Arc<NodeGroupsPlugin>>,
pub(crate) metrics: Arc<MetricsContext>,
pub(crate) get_task_logs_tx: Sender<GetTaskLogsRequest>,
pub(crate) restart_task_tx: Sender<RestartTaskRequest>,
}
#[allow(clippy::too_many_arguments)]
pub async fn start_server(
host: &str,
port: u16,
store_context: Arc<StoreContext>,
admin_api_key: String,
storage_provider: Option<Arc<dyn StorageProvider>>,
heartbeats: Arc<LoopHeartbeats>,
redis_store: Arc<RedisStore>,
hourly_upload_limit: i64,
contracts: Option<Contracts<WalletProvider>>,
pool_id: u32,
server_mode: ServerMode,
scheduler: Scheduler,
node_groups_plugin: Option<Arc<NodeGroupsPlugin>>,
metrics: Arc<MetricsContext>,
get_task_logs_tx: Sender<GetTaskLogsRequest>,
restart_task_tx: Sender<RestartTaskRequest>,
) -> Result<(), Error> {
info!("Starting server at http://{host}:{port}");
let app_state = Data::new(AppState {
store_context,
storage_provider,
heartbeats,
redis_store,
hourly_upload_limit,
contracts,
pool_id,
scheduler,
node_groups_plugin,
metrics,
get_task_logs_tx,
restart_task_tx,
});
let node_store = app_state.store_context.node_store.clone();
let node_store_clone = node_store.clone();
let validator_state = Arc::new(
ValidatorState::new(vec![])
.with_redis(app_state.redis_store.client.clone())
.await
.map_err(|e| anyhow::anyhow!("Failed to initialize Redis connection pool: {}", e))?
.with_async_validator(move |address| {
let address = *address;
let node_store = node_store_clone.clone();
Box::pin(async move {
match node_store.get_node(&address).await {
Ok(Some(node)) => node.status != NodeStatus::Ejected,
_ => false,
}
})
}),
);
let api_key_middleware = Arc::new(ApiKeyMiddleware::new(admin_api_key));
HttpServer::new(move || {
let mut app = App::new()
.app_data(app_state.clone())
.wrap(middleware::Logger::default())
.wrap(Compress::default())
.wrap(NormalizePath::new(TrailingSlash::Trim))
.app_data(web::PayloadConfig::default().limit(2_097_152))
.service(web::resource("/health").route(web::get().to(health_check)))
.service(
web::resource("/api-docs/openapi.json")
.route(web::get().to(|| async { HttpResponse::Ok().json(ApiDoc::openapi()) })),
)
.service(metrics_routes().wrap(api_key_middleware.clone()));
if !matches!(server_mode, ServerMode::ProcessorOnly) {
app = app
.service(heartbeat_routes().wrap(ValidateSignature::new(validator_state.clone())))
.service(storage_routes().wrap(ValidateSignature::new(validator_state.clone())))
.service(nodes_routes().wrap(api_key_middleware.clone()))
.service(tasks_routes().wrap(api_key_middleware.clone()))
.service(groups_routes().wrap(api_key_middleware.clone()))
.service(
SwaggerUi::new("/swagger-ui/{_:.*}")
.url("/api-docs/openapi.json", ApiDoc::openapi()),
)
.service(web::redirect("/docs", "/swagger-ui/index.html"))
.default_service(web::route().to(|| async {
HttpResponse::NotFound().json(json!({
"success": false,
"error": "Resource not found"
}))
}));
}
app
})
.bind((host, port))?
.run()
.await?;
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/tests/helper.rs | crates/orchestrator/src/api/tests/helper.rs | #[cfg(test)]
use crate::api::server::AppState;
#[cfg(test)]
use crate::store::core::RedisStore;
#[cfg(test)]
use crate::store::core::StoreContext;
#[cfg(test)]
use actix_web::web::Data;
#[cfg(test)]
use shared::web3::contracts::core::builder::{ContractBuilder, Contracts};
#[cfg(test)]
use shared::web3::wallet::Wallet;
#[cfg(test)]
use shared::web3::wallet::WalletProvider;
#[cfg(test)]
use std::sync::Arc;
#[cfg(test)]
use url::Url;
#[cfg(test)]
pub(crate) async fn create_test_app_state() -> Data<AppState> {
use shared::utils::MockStorageProvider;
use crate::{
metrics::MetricsContext, scheduler::Scheduler, utils::loop_heartbeats::LoopHeartbeats,
ServerMode,
};
let store = Arc::new(RedisStore::new_test());
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
let store_context = Arc::new(StoreContext::new(store.clone()));
let mode = ServerMode::Full;
let scheduler = Scheduler::new(store_context.clone(), vec![]);
let mock_storage = MockStorageProvider::new();
let storage_provider = Arc::new(mock_storage);
let metrics = Arc::new(MetricsContext::new(1.to_string()));
let (get_task_logs_tx, _) = tokio::sync::mpsc::channel(1);
let (restart_task_tx, _) = tokio::sync::mpsc::channel(1);
Data::new(AppState {
store_context: store_context.clone(),
contracts: None,
pool_id: 1,
storage_provider: Some(storage_provider),
heartbeats: Arc::new(LoopHeartbeats::new(&mode)),
hourly_upload_limit: 12,
redis_store: store.clone(),
scheduler,
node_groups_plugin: None,
metrics,
get_task_logs_tx,
restart_task_tx,
})
}
#[cfg(test)]
pub(crate) async fn create_test_app_state_with_nodegroups() -> Data<AppState> {
use shared::utils::MockStorageProvider;
use crate::{
metrics::MetricsContext,
plugins::node_groups::{NodeGroupConfiguration, NodeGroupsPlugin},
scheduler::Scheduler,
utils::loop_heartbeats::LoopHeartbeats,
ServerMode,
};
let store = Arc::new(RedisStore::new_test());
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
let store_context = Arc::new(StoreContext::new(store.clone()));
let mode = ServerMode::Full;
let scheduler = Scheduler::new(store_context.clone(), vec![]);
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 1,
max_group_size: 1,
compute_requirements: None,
};
let node_groups_plugin = Some(Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
)));
let mock_storage = MockStorageProvider::new();
let storage_provider = Arc::new(mock_storage);
let metrics = Arc::new(MetricsContext::new(1.to_string()));
let (get_task_logs_tx, _) = tokio::sync::mpsc::channel(1);
let (restart_task_tx, _) = tokio::sync::mpsc::channel(1);
Data::new(AppState {
store_context: store_context.clone(),
contracts: None,
pool_id: 1,
storage_provider: Some(storage_provider),
heartbeats: Arc::new(LoopHeartbeats::new(&mode)),
hourly_upload_limit: 12,
redis_store: store.clone(),
scheduler,
node_groups_plugin,
metrics,
get_task_logs_tx,
restart_task_tx,
})
}
#[cfg(test)]
pub(crate) fn setup_contract() -> Contracts<WalletProvider> {
let coordinator_key = "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97";
let rpc_url: Url = Url::parse("http://localhost:8545").unwrap();
let wallet = Wallet::new(coordinator_key, rpc_url).unwrap();
ContractBuilder::new(wallet.provider)
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap()
}
#[cfg(test)]
pub(crate) async fn create_test_app_state_with_metrics() -> Data<AppState> {
use shared::utils::MockStorageProvider;
use crate::{
metrics::MetricsContext, scheduler::Scheduler, utils::loop_heartbeats::LoopHeartbeats,
ServerMode,
};
let store = Arc::new(RedisStore::new_test());
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
let store_context = Arc::new(StoreContext::new(store.clone()));
let mode = ServerMode::Full;
let scheduler = Scheduler::new(store_context.clone(), vec![]);
let mock_storage = MockStorageProvider::new();
let storage_provider = Arc::new(mock_storage);
let metrics = Arc::new(MetricsContext::new("0".to_string()));
let (get_task_logs_tx, _) = tokio::sync::mpsc::channel(1);
let (restart_task_tx, _) = tokio::sync::mpsc::channel(1);
Data::new(AppState {
store_context: store_context.clone(),
contracts: None,
pool_id: 1,
storage_provider: Some(storage_provider),
heartbeats: Arc::new(LoopHeartbeats::new(&mode)),
hourly_upload_limit: 12,
redis_store: store.clone(),
scheduler,
node_groups_plugin: None,
metrics,
get_task_logs_tx,
restart_task_tx,
})
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/tests/mod.rs | crates/orchestrator/src/api/tests/mod.rs | pub(crate) mod helper;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/storage.rs | crates/orchestrator/src/api/routes/storage.rs | use crate::api::server::AppState;
use actix_web::{
web::{self, post, Data},
HttpRequest, HttpResponse, Scope,
};
use redis::AsyncCommands;
use shared::models::storage::RequestUploadRequest;
use std::time::Duration;
const MAX_FILE_SIZE: u64 = 100 * 1024 * 1024;
fn validate_file_name(file_name: &str) -> Result<String, String> {
if file_name.trim().is_empty() {
return Err("File name cannot be empty".to_string());
}
if file_name.contains('\0') {
return Err("File name cannot contain null bytes".to_string());
}
Ok(file_name.to_string())
}
async fn request_upload(
req: HttpRequest,
request_upload: web::Json<RequestUploadRequest>,
app_state: Data<AppState>,
) -> HttpResponse {
let Some(storage_provider) = &app_state.storage_provider else {
return HttpResponse::InternalServerError().json(serde_json::json!({
"success": false,
"error": "Storage provider not found"
}));
};
// Check file size limit first
if request_upload.file_size > MAX_FILE_SIZE {
return HttpResponse::PayloadTooLarge().json(serde_json::json!({
"success": false,
"error": format!("File size exceeds maximum allowed size of 100MB")
}));
}
let validated_file_name = match validate_file_name(&request_upload.file_name) {
Ok(name) => name,
Err(error) => {
return HttpResponse::BadRequest().json(serde_json::json!({
"success": false,
"error": format!("Invalid file name: {}", error)
}));
}
};
let mut redis_con = match app_state
.redis_store
.client
.get_multiplexed_async_connection()
.await
{
Ok(con) => con,
Err(e) => {
return HttpResponse::InternalServerError().json(serde_json::json!({
"success": false,
"error": format!("Failed to connect to Redis: {}", e)
}));
}
};
let address = req
.headers()
.get("x-address")
.and_then(|h| h.to_str().ok())
.ok_or_else(|| {
HttpResponse::BadRequest().json(serde_json::json!({
"success": false,
"error": "Missing required x-address header"
}))
})
.unwrap();
let rate_limit_key = format!("rate_limit:storage:upload:{address}");
let hourly_limit = app_state.hourly_upload_limit;
// Check current request count
let current_count: Result<Option<i64>, redis::RedisError> =
redis_con.get(&rate_limit_key).await;
match current_count {
Ok(Some(count)) if count >= hourly_limit => {
return HttpResponse::TooManyRequests().json(serde_json::json!({
"success": false,
"error": format!("Rate limit exceeded. Maximum {} uploads per hour.", hourly_limit)
}));
}
Ok(Some(count)) => {
log::info!("Current rate limit count for {address}: {count}/{hourly_limit} per hour");
}
Ok(None) => {
log::info!("No rate limit record for {address} yet");
}
Err(e) => {
log::error!("Redis rate limiting error: {e}");
// Continue processing if rate limiting fails
}
}
let task = match app_state
.store_context
.task_store
.get_task(&request_upload.task_id)
.await
{
Ok(Some(task)) => task,
Ok(None) => {
return HttpResponse::NotFound().json(serde_json::json!({
"success": false,
"error": format!("Task not found")
}));
}
Err(e) => {
return HttpResponse::InternalServerError().json(serde_json::json!({
"success": false,
"error": format!("Failed to retrieve task: {}", e)
}));
}
};
let storage_config = task.storage_config;
let mut file_name = validated_file_name.clone();
let mut group_id = None;
if let Some(storage_config) = storage_config {
if let Some(file_name_template) = storage_config.file_name_template {
file_name = generate_file_name(&file_name_template, &validated_file_name);
// TODO: This is a temporary integration of node groups plugin functionality.
// We have a plan to move this to proper expander traits that will handle
// variable expansion in a more modular and extensible way. This current
// implementation gets the job done but is not ideal from an architectural
// perspective. The expander traits will allow us to:
// 1. Register multiple expanders for different variable types
// 2. Handle expansion in a more generic way
// 3. Make testing easier by allowing mock expanders
// 4. Support more complex variable expansion patterns
// For now, this direct integration with node_groups_plugin works but will
// be refactored in the future.
if let Some(node_group_plugin) = &app_state.node_groups_plugin {
let plugin = node_group_plugin.clone();
match plugin.get_node_group(address).await {
Ok(Some(group)) => {
group_id = Some(group.id.clone());
file_name = file_name.replace("${NODE_GROUP_ID}", &group.id);
file_name =
file_name.replace("${NODE_GROUP_SIZE}", &group.nodes.len().to_string());
let idx = plugin.get_idx_in_group(&group, address).unwrap();
file_name = file_name.replace("${NODE_GROUP_INDEX}", &idx.to_string());
}
Ok(None) => {
log::warn!(
"Node group not found for address for upload request: {address}"
);
}
Err(e) => {
log::error!("Error getting node group: {e}");
}
}
}
}
}
// Create a unique key for this file upload based on address, group_id, and file name
let upload_key = match &group_id {
Some(gid) => format!("upload:{}:{}:{}", address, gid, &validated_file_name),
None => format!("upload:{}:{}:{}", address, "no-group", &validated_file_name),
};
let upload_exists: Result<Option<String>, redis::RedisError> = redis_con.get(&upload_key).await;
if let Ok(None) = upload_exists {
if let Err(e) = redis_con.set::<_, _, ()>(&upload_key, "pending").await {
log::error!("Failed to set upload status in Redis: {e}");
}
}
let pattern = match &group_id {
Some(gid) => format!("upload:{address}:{gid}:*"),
None => format!("upload:{address}:no-group:*"),
};
let total_uploads: Result<Vec<String>, redis::RedisError> = {
let mut keys = Vec::new();
match redis_con.scan_match(&pattern).await {
Ok(mut iter) => {
while let Some(key) = iter.next_item().await {
keys.push(key);
}
Ok(keys)
}
Err(e) => Err(e),
}
};
let upload_count = match total_uploads {
Ok(keys) => keys.len(),
Err(e) => {
log::error!("Failed to count uploads: {e}");
0
}
};
let file_number = upload_count.saturating_sub(1);
if file_name.contains("${TOTAL_UPLOAD_COUNT_AFTER}") {
file_name = file_name.replace("${TOTAL_UPLOAD_COUNT_AFTER}", &upload_count.to_string());
}
if file_name.contains("${CURRENT_FILE_INDEX}") {
file_name = file_name.replace("${CURRENT_FILE_INDEX}", &file_number.to_string());
}
let file_size = &request_upload.file_size;
let file_type = &request_upload.file_type;
let sha256 = &request_upload.sha256;
log::info!(
"Received upload request for file: {file_name}, size: {file_size}, type: {file_type}, sha256: {sha256}"
);
log::info!("Generating mapping file for sha256: {sha256} to file: {file_name}",);
if let Err(e) = storage_provider
.generate_mapping_file(sha256, &file_name)
.await
{
log::error!("Failed to generate mapping file: {e}");
return HttpResponse::InternalServerError().json(serde_json::json!({
"success": false,
"error": format!("Failed to generate mapping file: {}", e)
}));
}
log::info!(
"Successfully generated mapping file. Generating signed upload URL for file: {file_name}"
);
// Generate signed upload URL
match storage_provider
.generate_upload_signed_url(
&file_name,
Some(file_type.to_string()),
Duration::from_secs(3600), // 1 hour expiry
Some(*file_size),
)
.await
{
Ok(signed_url) => {
// Increment rate limit counter after successful URL generation
let rate_limit_key = format!("rate_limit:storage:upload:{address}");
let expiry_seconds = 3600; // 1 hour
// Increment the counter or create it if it doesn't exist
let new_count: Result<i64, redis::RedisError> =
redis_con.incr(&rate_limit_key, 1).await;
match new_count {
Ok(count) => {
// Set expiry if this is the first request (count == 1)
if count == 1 {
let _: Result<(), redis::RedisError> =
redis_con.expire(&rate_limit_key, expiry_seconds).await;
}
log::info!(
"Rate limit count for {}: {}/{} per hour",
address,
count,
app_state.hourly_upload_limit
);
}
Err(e) => {
log::error!("Failed to update rate limit counter: {e}");
}
}
log::info!("Successfully generated signed upload URL for file: {file_name}");
app_state.metrics.increment_file_upload_requests(
&request_upload.task_id,
&task.name,
address,
);
#[cfg(test)]
return HttpResponse::Ok().json(serde_json::json!({
"success": true,
"signed_url": signed_url,
"file_name": file_name
}));
#[cfg(not(test))]
HttpResponse::Ok().json(serde_json::json!({
"success": true,
"signed_url": signed_url
}))
}
Err(e) => {
log::error!("Failed to generate upload URL: {e}");
HttpResponse::InternalServerError().json(serde_json::json!({
"success": false,
"error": format!("Failed to generate upload URL: {}", e)
}))
}
}
}
pub(crate) fn storage_routes() -> Scope {
web::scope("/storage").route("/request-upload", post().to(request_upload))
}
fn generate_file_name(template: &str, original_name: &str) -> String {
let mut file_name = template.to_string();
file_name = file_name.replace("${ORIGINAL_NAME}", original_name);
file_name
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use std::sync::Arc;
use super::*;
use crate::{
api::tests::helper::{create_test_app_state, create_test_app_state_with_nodegroups},
models::node::{NodeStatus, OrchestratorNode},
plugins::node_groups::{NodeGroupConfiguration, NodeGroupsPlugin},
};
use actix_web::{test, web::post, App};
use alloy::primitives::Address;
use shared::models::task::{SchedulingConfig, StorageConfig, Task};
use uuid::Uuid;
#[tokio::test]
async fn test_validate_file_name() {
assert_eq!(validate_file_name("test.txt").unwrap(), "test.txt");
assert_eq!(validate_file_name("file.parquet").unwrap(), "file.parquet");
assert_eq!(
validate_file_name("some/path/file.txt").unwrap(),
"some/path/file.txt"
);
// These are now allowed since GCS handles them safely
assert_eq!(
validate_file_name("../../../etc/passwd").unwrap(),
"../../../etc/passwd"
);
assert_eq!(validate_file_name("/etc/passwd").unwrap(), "/etc/passwd");
// Null bytes still blocked
assert!(validate_file_name("test.txt\0.exe").is_err());
// Empty names still blocked
assert!(validate_file_name("").is_err());
assert!(validate_file_name(" ").is_err());
}
#[tokio::test]
async fn test_generate_file_name() {
let template = "test/${ORIGINAL_NAME}";
let original_name = "test";
let file_name = generate_file_name(template, original_name);
assert_eq!(file_name, "test/test");
}
#[actix_web::test]
async fn test_request_upload_success() {
let app_state = create_test_app_state().await;
let task = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
storage_config: Some(StorageConfig {
file_name_template: Some("model_123/user_uploads/${ORIGINAL_NAME}".to_string()),
}),
..Default::default()
};
let task_store = app_state.store_context.task_store.clone();
let _ = task_store.add_task(task.clone()).await;
assert!(task_store
.get_task(&task.id.to_string())
.await
.unwrap()
.is_some());
let app =
test::init_service(App::new().app_data(app_state.clone()).service(
web::scope("/storage").route("/request-upload", post().to(request_upload)),
))
.await;
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", "test_address"))
.set_json(&RequestUploadRequest {
file_name: "test.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["file_name"],
serde_json::Value::String("model_123/user_uploads/test.parquet".to_string())
);
let metrics = app_state.metrics.export_metrics().unwrap();
assert!(metrics.contains("orchestrator_file_upload_requests_total"));
assert!(metrics.contains(&format!("orchestrator_file_upload_requests_total{{node_address=\"test_address\",pool_id=\"{}\",task_id=\"{}\",task_name=\"test-task\"}} 1", app_state.metrics.pool_id, task.id)));
}
#[actix_web::test]
async fn test_request_upload_input_validation() {
let app_state = create_test_app_state().await;
let task = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
storage_config: Some(StorageConfig {
file_name_template: Some("model_123/user_uploads/${ORIGINAL_NAME}".to_string()),
}),
..Default::default()
};
let task_store = app_state.store_context.task_store.clone();
let _ = task_store.add_task(task.clone()).await;
let app =
test::init_service(App::new().app_data(app_state.clone()).service(
web::scope("/storage").route("/request-upload", post().to(request_upload)),
))
.await;
// Test null byte injection (still blocked)
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", "test_address"))
.set_json(&RequestUploadRequest {
file_name: "test.txt\0.exe".to_string(),
file_size: 1024,
file_type: "text/plain".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(false));
assert!(json["error"].as_str().unwrap().contains("null bytes"));
// Test various file names (now allowed)
let test_cases = vec![
(
"../../../etc/passwd",
"model_123/user_uploads/../../../etc/passwd",
),
("/etc/passwd", "model_123/user_uploads//etc/passwd"),
(
"some/path/test.txt",
"model_123/user_uploads/some/path/test.txt",
),
];
for (input, expected) in test_cases {
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", "test_address"))
.set_json(&RequestUploadRequest {
file_name: input.to_string(),
file_size: 1024,
file_type: "text/plain".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["file_name"],
serde_json::Value::String(expected.to_string())
);
}
}
#[actix_web::test]
async fn test_request_upload_invalid_task() {
let app_state = create_test_app_state().await;
let app =
test::init_service(App::new().app_data(app_state.clone()).service(
web::scope("/storage").route("/request-upload", post().to(request_upload)),
))
.await;
let non_existing_task_id = Uuid::new_v4().to_string();
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", "test_address"))
.set_json(&RequestUploadRequest {
file_name: "test.txt".to_string(),
file_size: 1024,
file_type: "text/plain".to_string(),
sha256: "test_sha256".to_string(),
task_id: non_existing_task_id,
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(false));
}
#[actix_web::test]
async fn test_with_node_and_group() {
let app_state = create_test_app_state_with_nodegroups().await;
let node = OrchestratorNode {
address: Address::ZERO,
ip_address: "127.0.0.1".to_string(),
port: 8080,
p2p_id: Some("test_p2p_id".to_string()),
status: NodeStatus::Healthy,
..Default::default()
};
let _ = app_state
.store_context
.node_store
.add_node(node.clone())
.await;
let node_from_store = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap();
assert!(node_from_store.is_some());
if let Some(node_from_store) = node_from_store {
assert_eq!(node.address, node_from_store.address);
}
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 1,
max_group_size: 1,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
app_state.redis_store.clone(),
app_state.store_context.clone(),
None,
None,
));
let _ = app_state
.store_context
.task_store
.add_observer(plugin.clone())
.await;
let _ = plugin.handle_status_change(&node).await;
let task = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
storage_config: Some(StorageConfig {
file_name_template: Some(
"model_xyz/dataset_1/${NODE_GROUP_ID}-${NODE_GROUP_SIZE}-${NODE_GROUP_INDEX}-${TOTAL_UPLOAD_COUNT_AFTER}-${CURRENT_FILE_INDEX}.parquet".to_string(),
),
}),
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config".to_string()],
)]),
)])),
}),
..Default::default()
};
let task_store = app_state.store_context.task_store.clone();
let _ = task_store.add_task(task.clone()).await;
let _ = plugin.test_try_form_new_groups().await;
let group = plugin
.get_node_group(&node.address.to_string())
.await
.unwrap();
assert!(group.is_some());
let group = group.unwrap();
assert!(task_store
.get_task(&task.id.to_string())
.await
.unwrap()
.is_some());
let app =
test::init_service(App::new().app_data(app_state.clone()).service(
web::scope("/storage").route("/request-upload", post().to(request_upload)),
))
.await;
// First request with test.parquet
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", node.address.to_string()))
.set_json(&RequestUploadRequest {
file_name: "test.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["file_name"],
serde_json::Value::String(format!(
"model_xyz/dataset_1/{}-{}-{}-{}-{}.parquet",
group.id,
group.nodes.len(),
0,
1,
0
))
);
// Second request with same file name - should not increment count
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", node.address.to_string()))
.set_json(&RequestUploadRequest {
file_name: "test.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["file_name"],
serde_json::Value::String(format!(
"model_xyz/dataset_1/{}-{}-{}-{}-{}.parquet",
group.id,
group.nodes.len(),
0,
1,
0
))
);
// Third request with different file name - should increment count
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", node.address.to_string()))
.set_json(&RequestUploadRequest {
file_name: "test2.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256_2".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["file_name"],
serde_json::Value::String(format!(
"model_xyz/dataset_1/{}-{}-{}-{}-{}.parquet",
group.id,
group.nodes.len(),
0,
2,
1
))
);
}
#[actix_web::test]
async fn test_upload_counter_without_node_group() {
let app_state = create_test_app_state_with_nodegroups().await;
let node = OrchestratorNode {
address: Address::ZERO,
ip_address: "127.0.0.1".to_string(),
port: 8080,
p2p_id: Some("test_p2p_id".to_string()),
status: NodeStatus::Healthy,
..Default::default()
};
let _ = app_state
.store_context
.node_store
.add_node(node.clone())
.await;
let node_from_store = app_state
.store_context
.node_store
.get_node(&node.address)
.await
.unwrap();
assert!(node_from_store.is_some());
if let Some(node) = node_from_store {
assert_eq!(node.address, node.address);
}
let task = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
storage_config: Some(StorageConfig {
file_name_template: Some(
"model_xyz/dataset_1/${TOTAL_UPLOAD_COUNT_AFTER}-${CURRENT_FILE_INDEX}.parquet"
.to_string(),
),
}),
..Default::default()
};
let task_store = app_state.store_context.task_store.clone();
let _ = task_store.add_task(task.clone()).await;
assert!(task_store
.get_task(&task.id.to_string())
.await
.unwrap()
.is_some());
let app =
test::init_service(App::new().app_data(app_state.clone()).service(
web::scope("/storage").route("/request-upload", post().to(request_upload)),
))
.await;
// First request with test.parquet
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", node.address.to_string()))
.set_json(&RequestUploadRequest {
file_name: "test.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
// As defined in the storage template: model_xyz/dataset_1/${upload_count}-${file_number}.parquet
// Upload count var is 1 while file number is 0
assert_eq!(
json["file_name"],
serde_json::Value::String("model_xyz/dataset_1/1-0.parquet".to_string())
);
// Second request with same file name - should not increment count
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", node.address.to_string()))
.set_json(&RequestUploadRequest {
file_name: "test.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
// Upload count var is 1 while file number is 0
assert_eq!(
json["file_name"],
serde_json::Value::String("model_xyz/dataset_1/1-0.parquet".to_string())
);
// Third request with different file name - should increment count
let req = test::TestRequest::post()
.uri("/storage/request-upload")
.insert_header(("x-address", node.address.to_string()))
.set_json(&RequestUploadRequest {
file_name: "test2.parquet".to_string(),
file_size: 1024,
file_type: "application/octet-stream".to_string(),
sha256: "test_sha256_2".to_string(),
task_id: task.id.to_string(),
})
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["file_name"],
serde_json::Value::String("model_xyz/dataset_1/2-1.parquet".to_string())
);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/mod.rs | crates/orchestrator/src/api/routes/mod.rs | pub(crate) mod groups;
pub(crate) mod heartbeat;
pub(crate) mod metrics;
pub(crate) mod nodes;
pub(crate) mod storage;
pub(crate) mod task;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/nodes.rs | crates/orchestrator/src/api/routes/nodes.rs | use crate::api::server::AppState;
use actix_web::{
web::{self, get, post, Data, Query},
HttpResponse, Scope,
};
use alloy::primitives::Address;
use log::{error, info};
use serde::Deserialize;
use serde_json::json;
use std::str::FromStr;
#[derive(Deserialize)]
struct NodeQuery {
include_dead: Option<bool>,
}
#[utoipa::path(
get,
path = "/nodes",
params(
("include_dead" = Option<bool>, Query, description = "Include dead nodes in the response")
),
responses(
(status = 200, description = "List of nodes retrieved successfully"),
(status = 500, description = "Internal server error")
),
tag = "nodes"
)]
async fn get_nodes(query: Query<NodeQuery>, app_state: Data<AppState>) -> HttpResponse {
let nodes = match app_state.store_context.node_store.get_nodes().await {
Ok(mut nodes) => {
// Filter out dead nodes unless include_dead is true
if !query.include_dead.unwrap_or(false) {
nodes.retain(|node| node.status != crate::models::node::NodeStatus::Dead);
}
nodes
}
Err(e) => {
error!("Error getting nodes: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to get nodes"
}));
}
};
let mut status_counts = json!({});
for node in &nodes {
let status_str = format!("{:?}", node.status);
if let Some(count) = status_counts.get(&status_str) {
if let Some(count_value) = count.as_u64() {
status_counts[status_str] = json!(count_value + 1);
} else {
status_counts[status_str] = json!(1);
}
} else {
status_counts[status_str] = json!(1);
}
}
let mut response = json!({
"success": true,
"nodes": nodes,
"counts": status_counts
});
// If node groups plugin exists, add group information to each node
if let Some(node_groups_plugin) = &app_state.node_groups_plugin {
let mut nodes_with_groups = Vec::new();
let node_addresses: Vec<String> =
nodes.iter().map(|node| node.address.to_string()).collect();
match node_groups_plugin
.get_node_groups_batch(&node_addresses)
.await
{
Ok(node_groups) => {
for node in &nodes {
let mut node_json = json!(node);
if let Some(Some(group)) = node_groups.get(&node.address.to_string()) {
node_json["group"] = json!({
"id": group.id,
"size": group.nodes.len(),
"created_at": group.created_at,
"topology_config": group.configuration_name
});
}
nodes_with_groups.push(node_json);
}
}
Err(e) => {
error!("Error getting node groups batch: {e}");
// Fall back to nodes without group information
nodes_with_groups = nodes.iter().map(|node| json!(node)).collect();
}
}
response["nodes"] = json!(nodes_with_groups);
}
HttpResponse::Ok().json(response)
}
#[utoipa::path(
post,
path = "/nodes/{node_id}/restart",
params(
("node_id" = String, Path, description = "Node address to restart task for")
),
responses(
(status = 200, description = "Task restarted successfully"),
(status = 400, description = "Bad request - invalid node address or node missing p2p information"),
(status = 404, description = "Node not found"),
(status = 500, description = "Internal server error")
),
tag = "nodes"
)]
async fn restart_node_task(node_id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
let Ok(node_address) = Address::from_str(&node_id) else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": format!("Invalid node address: {}", node_id)
}));
};
let node = match app_state
.store_context
.node_store
.get_node(&node_address)
.await
{
Ok(Some(node)) => node,
Ok(None) => {
return HttpResponse::NotFound().json(json!({
"success": false,
"error": format!("Node not found: {}", node_id)
}));
}
Err(e) => {
error!("Error getting node: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to get node"
}));
}
};
if node.worker_p2p_id.is_none() || node.worker_p2p_addresses.is_none() {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Node does not have p2p information"
}));
}
let p2p_id = node
.worker_p2p_id
.as_ref()
.expect("worker_p2p_id should be present");
let p2p_addresses = node
.worker_p2p_addresses
.as_ref()
.expect("worker_p2p_addresses should be present");
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let restart_task_request = crate::p2p::RestartTaskRequest {
worker_wallet_address: node.address,
worker_p2p_id: p2p_id.clone(),
worker_addresses: p2p_addresses.clone(),
response_tx,
};
if let Err(e) = app_state.restart_task_tx.send(restart_task_request).await {
error!("Failed to send restart task request: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to send restart task request"
}));
}
match response_rx.await {
Ok(_) => HttpResponse::Ok().json(json!({
"success": true,
"message": "Task restarted successfully"
})),
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to restart task: {}", e)
})),
}
}
#[utoipa::path(
get,
path = "/nodes/{node_id}/logs",
params(
("node_id" = String, Path, description = "Node address to get logs for")
),
responses(
(status = 200, description = "Node logs retrieved successfully"),
(status = 400, description = "Bad request - invalid node address or node missing p2p information"),
(status = 500, description = "Internal server error")
),
tag = "nodes"
)]
async fn get_node_logs(node_id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
let Ok(node_address) = Address::from_str(&node_id) else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": format!("Invalid node address: {}", node_id)
}));
};
let node = match app_state
.store_context
.node_store
.get_node(&node_address)
.await
{
Ok(Some(node)) => node,
Ok(None) => {
return HttpResponse::Ok().json(json!({"success": false, "logs": "Node not found"}));
}
Err(e) => {
error!("Error getting node: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to get node"
}));
}
};
if node.worker_p2p_id.is_none() || node.worker_p2p_addresses.is_none() {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Node does not have p2p information"
}));
}
let Some(p2p_id) = node.worker_p2p_id.as_ref() else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Node does not have worker p2p id"
}));
};
let Some(p2p_addresses) = node.worker_p2p_addresses.as_ref() else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Node does not have worker p2p addresses"
}));
};
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let get_task_logs_request = crate::p2p::GetTaskLogsRequest {
worker_wallet_address: node.address,
worker_p2p_id: p2p_id.clone(),
worker_addresses: p2p_addresses.clone(),
response_tx,
};
if let Err(e) = app_state.get_task_logs_tx.send(get_task_logs_request).await {
error!("Failed to send get task logs request: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to send get task logs request"
}));
}
match response_rx.await {
Ok(logs) => HttpResponse::Ok().json(json!({
"success": true,
"logs": logs
})),
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to get logs: {}", e)
})),
}
}
#[utoipa::path(
get,
path = "/nodes/{node_id}/metrics",
params(
("node_id" = String, Path, description = "Node address to get metrics for")
),
responses(
(status = 200, description = "Node metrics retrieved successfully"),
(status = 400, description = "Bad request - invalid node address"),
(status = 500, description = "Internal server error")
),
tag = "nodes"
)]
async fn get_node_metrics(node_id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
let Ok(node_address) = Address::from_str(&node_id) else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": format!("Invalid node address: {}", node_id)
}));
};
let metrics = match app_state
.store_context
.metrics_store
.get_metrics_for_node(node_address)
.await
{
Ok(metrics) => metrics,
Err(e) => {
error!("Error getting metrics for node: {e}");
Default::default()
}
};
HttpResponse::Ok().json(json!({"success": true, "metrics": metrics}))
}
#[utoipa::path(
post,
path = "/nodes/{node_id}/ban",
params(
("node_id" = String, Path, description = "Node address to ban")
),
responses(
(status = 200, description = "Node banned and ejected successfully"),
(status = 400, description = "Bad request - invalid node address"),
(status = 404, description = "Node not found"),
(status = 500, description = "Internal server error")
),
tag = "nodes"
)]
async fn ban_node(node_id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
info!("banning node: {node_id}");
let Ok(node_address) = Address::from_str(&node_id) else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": format!("Invalid node address: {}", node_id)
}));
};
let node = match app_state
.store_context
.node_store
.get_node(&node_address)
.await
{
Ok(Some(node)) => node,
Ok(None) => {
return HttpResponse::NotFound().json(json!({
"success": false,
"error": format!("Node not found: {}", node_id)
}));
}
Err(e) => {
error!("Error getting node: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to get node"
}));
}
};
if let Err(e) = app_state
.store_context
.node_store
.update_node_status(&node.address, crate::models::node::NodeStatus::Banned)
.await
{
error!("Error updating node status: {e}");
return HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Failed to update node status"
}));
}
// Attempt to eject from pool
if let Some(contracts) = &app_state.contracts {
match contracts
.compute_pool
.eject_node(app_state.pool_id, node.address)
.await
{
Ok(_) => HttpResponse::Ok().json(json!({
"success": true,
"message": format!("Node {} successfully ejected", node_id)
})),
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to eject node from pool: {}", e)
})),
}
} else {
HttpResponse::InternalServerError().json(json!({
"success": false,
"error": "Contracts not found"
}))
}
}
pub(crate) fn nodes_routes() -> Scope {
web::scope("/nodes")
.route("", get().to(get_nodes))
.route("/{node_id}/restart", post().to(restart_node_task))
.route("/{node_id}/logs", get().to(get_node_logs))
.route("/{node_id}/metrics", get().to(get_node_metrics))
.route("/{node_id}/ban", post().to(ban_node))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api::tests::helper::create_test_app_state;
use crate::models::node::NodeStatus;
use crate::models::node::OrchestratorNode;
use actix_web::http::StatusCode;
use actix_web::test;
use actix_web::App;
use alloy::primitives::Address;
use std::str::FromStr;
#[actix_web::test]
async fn test_get_nodes() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/nodes", get().to(get_nodes)),
)
.await;
let node = OrchestratorNode {
address: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status: NodeStatus::Discovered,
..Default::default()
};
app_state
.store_context
.node_store
.add_node(node.clone())
.await
.unwrap();
let req = test::TestRequest::get().uri("/nodes").to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(
resp.status(),
StatusCode::OK,
"Expected status OK but got {:?}",
resp.status()
);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(
json["success"], true,
"Expected success to be true but got {:?}",
json["success"]
);
let nodes_array = json["nodes"].as_array().unwrap();
assert_eq!(
nodes_array.len(),
1,
"Expected 1 node but got {}",
nodes_array.len()
);
assert_eq!(
nodes_array[0]["address"],
node.address.to_string(),
"Expected address to be {} but got {}",
node.address,
nodes_array[0]["address"]
);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/task.rs | crates/orchestrator/src/api/routes/task.rs | use crate::api::server::AppState;
use crate::plugins::node_groups::get_task_topologies;
use actix_web::{
web::{self, delete, get, post, Data},
HttpResponse, Scope,
};
use serde_json::json;
use shared::models::task::Task;
use shared::models::task::TaskRequest;
#[utoipa::path(
get,
path = "/tasks",
responses(
(status = 200, description = "List of all tasks retrieved successfully", body = Vec<Task>),
(status = 500, description = "Internal server error")
),
tag = "tasks"
)]
async fn get_all_tasks(app_state: Data<AppState>) -> HttpResponse {
let task_store = app_state.store_context.task_store.clone();
let tasks = task_store.get_all_tasks().await;
match tasks {
Ok(tasks) => HttpResponse::Ok().json(json!({"success": true, "tasks": tasks})),
Err(e) => HttpResponse::InternalServerError()
.json(json!({"success": false, "error": e.to_string()})),
}
}
#[utoipa::path(
post,
path = "/tasks",
request_body = TaskRequest,
responses(
(status = 200, description = "Task created successfully", body = Task),
(status = 400, description = "Bad request - task name already exists or validation failed"),
(status = 500, description = "Internal server error")
),
tag = "tasks"
)]
async fn create_task(task: web::Json<TaskRequest>, app_state: Data<AppState>) -> HttpResponse {
let task_request = task.into_inner();
let task_store = app_state.store_context.task_store.clone();
// Check if a task with the same name already exists
match task_store.task_name_exists(&task_request.name).await {
Ok(exists) => {
if exists {
return HttpResponse::BadRequest()
.json(json!({"success": false, "error": format!("Task with name '{}' already exists", task_request.name)}));
}
}
Err(e) => {
return HttpResponse::InternalServerError().json(
json!({"success": false, "error": format!("Failed to check task name: {}", e)}),
);
}
}
let task = match Task::try_from(task_request) {
Ok(task) => task,
Err(e) => {
return HttpResponse::BadRequest()
.json(json!({"success": false, "error": e.to_string()}));
}
};
if app_state.node_groups_plugin.is_some() {
match get_task_topologies(&task) {
Ok(topology) => {
if topology.is_empty() {
return HttpResponse::BadRequest().json(json!({"success": false, "error": "No topology found for task but grouping plugin is active."}));
}
}
Err(e) => {
return HttpResponse::BadRequest()
.json(json!({"success": false, "error": e.to_string()}));
}
}
}
if let Err(e) = task_store.add_task(task.clone()).await {
return HttpResponse::InternalServerError()
.json(json!({"success": false, "error": e.to_string()}));
}
HttpResponse::Ok().json(json!({"success": true, "task": task}))
}
#[utoipa::path(
delete,
path = "/tasks/{id}",
params(
("id" = String, Path, description = "Task ID to delete")
),
responses(
(status = 200, description = "Task deleted successfully"),
(status = 500, description = "Internal server error")
),
tag = "tasks"
)]
async fn delete_task(id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
let task_store = app_state.store_context.task_store.clone();
if let Err(e) = task_store.delete_task(id.into_inner()).await {
return HttpResponse::InternalServerError()
.json(json!({"success": false, "error": e.to_string()}));
}
HttpResponse::Ok().json(json!({"success": true}))
}
#[utoipa::path(
delete,
path = "/tasks/all",
responses(
(status = 200, description = "All tasks deleted successfully"),
(status = 500, description = "Internal server error")
),
tag = "tasks"
)]
async fn delete_all_tasks(app_state: Data<AppState>) -> HttpResponse {
let task_store = app_state.store_context.task_store.clone();
if let Err(e) = task_store.delete_all_tasks().await {
return HttpResponse::InternalServerError()
.json(json!({"success": false, "error": e.to_string()}));
}
HttpResponse::Ok().json(json!({"success": true}))
}
pub(crate) fn tasks_routes() -> Scope {
web::scope("/tasks")
.route("", get().to(get_all_tasks))
.route("", post().to(create_task))
.route("/all", delete().to(delete_all_tasks))
.route("/{id}", delete().to(delete_task))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api::tests::helper::create_test_app_state;
use actix_web::http::StatusCode;
use actix_web::test;
use actix_web::App;
use shared::models::task::Task;
use std::thread;
use std::time::Duration;
#[actix_web::test]
async fn test_create_task() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks", post().to(create_task)),
)
.await;
let payload = TaskRequest {
image: "test".to_string(),
name: "test".to_string(),
..Default::default()
};
let req = test::TestRequest::post()
.uri("/tasks")
.set_json(payload)
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert!(json["task"]["id"].is_string());
assert_eq!(json["task"]["image"], "test");
}
#[actix_web::test]
async fn get_task_when_no_task_exists() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks", get().to(get_all_tasks)),
)
.await;
let req = test::TestRequest::get().uri("/tasks").to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(json["tasks"], serde_json::Value::Array(vec![]));
}
#[actix_web::test]
async fn get_task_after_setting() {
let app_state = create_test_app_state().await;
let task: Task = TaskRequest {
image: "test".to_string(),
name: "test".to_string(),
..Default::default()
}
.try_into()
.unwrap();
let task_store = app_state.store_context.task_store.clone();
let _ = task_store.add_task(task).await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks", get().to(get_all_tasks)),
)
.await;
let req = test::TestRequest::get().uri("/tasks").to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["tasks"][0]["image"],
serde_json::Value::String("test".to_string())
);
}
#[actix_web::test]
async fn test_delete_task() {
let app_state = create_test_app_state().await;
let task: Task = TaskRequest {
image: "test".to_string(),
name: "test".to_string(),
..Default::default()
}
.try_into()
.unwrap();
let task_store = app_state.store_context.task_store.clone();
let _ = task_store.add_task(task.clone()).await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks/{id}", delete().to(delete_task)),
)
.await;
let req = test::TestRequest::delete()
.uri(&format!("/tasks/{}", task.id))
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let tasks = task_store.get_all_tasks().await.unwrap();
assert_eq!(tasks.len(), 0);
}
#[actix_web::test]
async fn test_multiple_tasks_sorting() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks", get().to(get_all_tasks)),
)
.await;
let task_store = app_state.store_context.task_store.clone();
// Create first task
let task1: Task = TaskRequest {
image: "test1".to_string(),
name: "test1".to_string(),
..Default::default()
}
.try_into()
.unwrap();
let _ = task_store.add_task(task1.clone()).await;
// Wait briefly to ensure different timestamps
thread::sleep(Duration::from_millis(10));
// Create second task
let task2: Task = TaskRequest {
image: "test2".to_string(),
name: "test2".to_string(),
..Default::default()
}
.try_into()
.unwrap();
let _ = task_store.add_task(task2.clone()).await;
// Test get all tasks - should be sorted by created_at descending
let req = test::TestRequest::get().uri("/tasks").to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
let tasks = json["tasks"].as_array().unwrap();
assert_eq!(tasks.len(), 2);
assert_eq!(tasks[0]["image"], "test2");
assert_eq!(tasks[1]["image"], "test1");
}
#[actix_web::test]
async fn test_task_ordering_with_multiple_additions() {
let app_state = create_test_app_state().await;
let task_store = app_state.store_context.task_store.clone();
// Add tasks in sequence with delays
for i in 1..=3 {
let task: Task = TaskRequest {
image: format!("test{}", i),
name: format!("test{}", i),
..Default::default()
}
.try_into()
.unwrap();
let _ = task_store.add_task(task).await;
thread::sleep(Duration::from_millis(10));
}
let tasks = task_store.get_all_tasks().await.unwrap();
// Verify tasks are ordered by created_at descending
assert_eq!(tasks.len(), 3);
assert_eq!(tasks[0].image, "test3");
assert_eq!(tasks[1].image, "test2");
assert_eq!(tasks[2].image, "test1");
}
#[actix_web::test]
async fn test_create_task_with_metadata() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks", post().to(create_task)),
)
.await;
let mut labels = std::collections::HashMap::new();
labels.insert("model".to_string(), "qwen3-4b".to_string());
labels.insert("dataset".to_string(), "intellect-2-rl-dataset".to_string());
labels.insert("version".to_string(), "v1".to_string());
let payload = TaskRequest {
image: "primeintellect/prime-rl:main".to_string(),
name: "Qwen3-4B:INTELLECT-2-RL-Dataset".to_string(),
metadata: Some(shared::models::task::TaskMetadata {
labels: Some(labels),
}),
..Default::default()
};
let req = test::TestRequest::post()
.uri("/tasks")
.set_json(payload)
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert!(json["task"]["id"].is_string());
assert_eq!(json["task"]["image"], "primeintellect/prime-rl:main");
assert_eq!(json["task"]["name"], "Qwen3-4B:INTELLECT-2-RL-Dataset");
// Verify metadata is preserved
assert!(json["task"]["metadata"].is_object());
assert!(json["task"]["metadata"]["labels"].is_object());
assert_eq!(json["task"]["metadata"]["labels"]["model"], "qwen3-4b");
assert_eq!(
json["task"]["metadata"]["labels"]["dataset"],
"intellect-2-rl-dataset"
);
assert_eq!(json["task"]["metadata"]["labels"]["version"], "v1");
}
#[actix_web::test]
async fn test_create_task_with_duplicate_name() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/tasks", post().to(create_task)),
)
.await;
// Create first task
let payload = TaskRequest {
image: "test".to_string(),
name: "duplicate-test".to_string(),
..Default::default()
};
let req = test::TestRequest::post()
.uri("/tasks")
.set_json(&payload)
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
// Try to create second task with same name
let req2 = test::TestRequest::post()
.uri("/tasks")
.set_json(&payload)
.to_request();
let resp2 = test::call_service(&app, req2).await;
assert_eq!(resp2.status(), StatusCode::BAD_REQUEST);
let body = test::read_body(resp2).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(false));
assert!(json["error"]
.as_str()
.unwrap()
.contains("Task with name 'duplicate-test' already exists"));
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/groups.rs | crates/orchestrator/src/api/routes/groups.rs | use crate::api::server::AppState;
use actix_web::{
web::{self, delete, get, post, Data},
HttpResponse, Scope,
};
use alloy::primitives::Address;
use futures::future::join_all;
use log::error;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::str::FromStr;
use std::time::Duration;
use utoipa::ToSchema;
const NODE_REQUEST_TIMEOUT: u64 = 30;
#[derive(Debug, Deserialize, Serialize, ToSchema)]
struct ForceRegroupRequest {
configuration_name: String,
}
#[utoipa::path(
get,
path = "/groups",
responses(
(status = 200, description = "List of all groups retrieved successfully"),
(status = 503, description = "Node groups plugin is not enabled"),
(status = 500, description = "Internal server error")
),
tag = "groups"
)]
async fn get_groups(app_state: Data<AppState>) -> HttpResponse {
if let Some(node_groups_plugin) = &app_state.node_groups_plugin {
match node_groups_plugin.get_all_groups().await {
Ok(groups) => {
let groups_with_details: Vec<_> = groups
.into_iter()
.map(|group| {
json!({
"id": group.id,
"nodes": group.nodes,
"node_count": group.nodes.len(),
"created_at": group.created_at,
"configuration_name": group.configuration_name
})
})
.collect();
HttpResponse::Ok().json(json!({
"success": true,
"groups": groups_with_details,
"total_count": groups_with_details.len()
}))
}
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to get groups: {}", e)
})),
}
} else {
HttpResponse::ServiceUnavailable().json(json!({
"success": false,
"error": "Node groups plugin is not enabled"
}))
}
}
#[utoipa::path(
get,
path = "/groups/configs",
responses(
(status = 200, description = "List of all configurations retrieved successfully"),
(status = 503, description = "Node groups plugin is not enabled")
),
tag = "groups"
)]
async fn get_configurations(app_state: Data<AppState>) -> HttpResponse {
if let Some(node_groups_plugin) = &app_state.node_groups_plugin {
let all_configs = node_groups_plugin.get_all_configuration_templates();
let available_configs = node_groups_plugin.get_available_configurations().await;
let available_names: std::collections::HashSet<String> =
available_configs.iter().map(|c| c.name.clone()).collect();
let configs_with_status: Vec<_> = all_configs
.into_iter()
.map(|config| {
json!({
"name": config.name,
"min_group_size": config.min_group_size,
"max_group_size": config.max_group_size,
"compute_requirements": config.compute_requirements,
"enabled": available_names.contains(&config.name)
})
})
.collect();
HttpResponse::Ok().json(json!({
"success": true,
"configurations": configs_with_status,
"total_count": configs_with_status.len()
}))
} else {
HttpResponse::ServiceUnavailable().json(json!({
"success": false,
"error": "Node groups plugin is not enabled"
}))
}
}
#[utoipa::path(
delete,
path = "/groups/{group_id}",
params(
("group_id" = String, Path, description = "Group ID to delete")
),
responses(
(status = 200, description = "Group deleted successfully"),
(status = 503, description = "Node groups plugin is not enabled"),
(status = 500, description = "Internal server error")
),
tag = "groups"
)]
async fn delete_group(group_id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
if let Some(node_groups_plugin) = &app_state.node_groups_plugin {
match node_groups_plugin.dissolve_group(&group_id).await {
Ok(()) => HttpResponse::Ok().json(json!({
"success": true,
"message": format!("Group {} successfully deleted", group_id.as_str())
})),
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to delete group: {}", e)
})),
}
} else {
HttpResponse::ServiceUnavailable().json(json!({
"success": false,
"error": "Node groups plugin is not enabled"
}))
}
}
#[utoipa::path(
get,
path = "/groups/{group_id}/logs",
params(
("group_id" = String, Path, description = "Group ID to get logs for")
),
responses(
(status = 200, description = "Group logs retrieved successfully"),
(status = 404, description = "Group not found"),
(status = 503, description = "Node groups plugin is not enabled"),
(status = 500, description = "Internal server error")
),
tag = "groups"
)]
async fn get_group_logs(group_id: web::Path<String>, app_state: Data<AppState>) -> HttpResponse {
if let Some(node_groups_plugin) = &app_state.node_groups_plugin {
match node_groups_plugin.get_group_by_id(&group_id).await {
Ok(Some(group)) => {
// Collect all node addresses
let node_addresses: Vec<Address> = group
.nodes
.iter()
.filter_map(|node_str| Address::from_str(node_str).ok())
.collect();
// Create futures for all node log requests
let log_futures: Vec<_> = node_addresses
.iter()
.map(|node_address| {
let app_state = app_state.clone();
let node_address = *node_address;
async move { fetch_node_logs_p2p(node_address, app_state).await }
})
.collect();
let log_results = join_all(log_futures).await;
let mut nodes = serde_json::Map::new();
for (i, node_address) in node_addresses.iter().enumerate() {
let node_str = node_address.to_string();
nodes.insert(node_str, log_results[i].clone());
}
let mut all_logs = json!({
"success": true,
"group_id": group.id,
"configuration": group.configuration_name,
"created_at": group.created_at,
});
if let Some(obj) = all_logs.as_object_mut() {
obj.insert("nodes".to_string(), serde_json::Value::Object(nodes));
}
HttpResponse::Ok().json(all_logs)
}
Ok(None) => HttpResponse::NotFound().json(json!({
"success": false,
"error": format!("Group not found: {}", group_id.as_str())
})),
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to get group: {}", e)
})),
}
} else {
HttpResponse::ServiceUnavailable().json(json!({
"success": false,
"error": "Node groups plugin is not enabled"
}))
}
}
async fn fetch_node_logs_p2p(
node_address: Address,
app_state: Data<AppState>,
) -> serde_json::Value {
let node = match app_state
.store_context
.node_store
.get_node(&node_address)
.await
{
Ok(node) => node,
Err(e) => {
error!("Failed to get node {node_address}: {e}");
return json!({
"success": false,
"error": format!("Failed to get node: {}", e)
});
}
};
match node {
Some(node) => {
// Check if node has P2P information
let (worker_p2p_id, worker_p2p_addresses) =
match (&node.worker_p2p_id, &node.worker_p2p_addresses) {
(Some(p2p_id), Some(p2p_addrs)) if !p2p_addrs.is_empty() => (p2p_id, p2p_addrs),
_ => {
error!("Node {node_address} does not have P2P information");
return json!({
"success": false,
"error": "Node does not have P2P information",
"status": node.status.to_string()
});
}
};
// Send P2P request for task logs
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let get_task_logs_request = crate::p2p::GetTaskLogsRequest {
worker_wallet_address: node_address,
worker_p2p_id: worker_p2p_id.clone(),
worker_addresses: worker_p2p_addresses.clone(),
response_tx,
};
if let Err(e) = app_state.get_task_logs_tx.send(get_task_logs_request).await {
error!("Failed to send GetTaskLogsRequest for node {node_address}: {e}");
return json!({
"success": false,
"error": format!("Failed to send request: {}", e),
"status": node.status.to_string()
});
};
match tokio::time::timeout(Duration::from_secs(NODE_REQUEST_TIMEOUT), response_rx).await
{
Ok(Ok(log_lines)) => {
json!({
"success": true,
"logs": log_lines,
"status": node.status.to_string()
})
}
Ok(Err(e)) => {
error!("P2P request failed for node {node_address}: {e}");
json!({
"success": false,
"error": format!("P2P request failed: {}", e),
"status": node.status.to_string()
})
}
Err(_) => {
error!("P2P request timed out for node {node_address}");
json!({
"success": false,
"error": "P2P request timed out",
"status": node.status.to_string()
})
}
}
}
None => {
error!("Node {node_address} not found in orchestrator");
json!({
"success": false,
"error": "Node not found in orchestrator",
"address": node_address.to_string()
})
}
}
}
#[utoipa::path(
post,
path = "/groups/force-regroup",
request_body = ForceRegroupRequest,
responses(
(status = 200, description = "Force regroup initiated successfully"),
(status = 404, description = "Configuration not found"),
(status = 503, description = "Node groups plugin is not enabled"),
(status = 500, description = "Internal server error")
),
tag = "groups"
)]
async fn force_regroup(
request: web::Json<ForceRegroupRequest>,
app_state: Data<AppState>,
) -> HttpResponse {
if let Some(node_groups_plugin) = &app_state.node_groups_plugin {
// Check if the configuration exists
let all_configs = node_groups_plugin.get_all_configuration_templates();
let config_exists = all_configs
.iter()
.any(|c| c.name == request.configuration_name);
if !config_exists {
return HttpResponse::NotFound().json(json!({
"success": false,
"error": format!("Configuration '{}' not found", request.configuration_name)
}));
}
match node_groups_plugin.get_all_groups().await {
Ok(groups) => {
let groups_to_dissolve: Vec<_> = groups
.into_iter()
.filter(|g| g.configuration_name == request.configuration_name)
.collect();
let group_count = groups_to_dissolve.len();
let node_count: usize = groups_to_dissolve.iter().map(|g| g.nodes.len()).sum();
// Dissolve all groups of this configuration
for group in groups_to_dissolve {
if let Err(e) = node_groups_plugin.dissolve_group(&group.id).await {
error!(
"Failed to dissolve group {} during force regroup: {}",
group.id, e
);
}
}
HttpResponse::Ok().json(json!({
"success": true,
"message": format!(
"Force regroup initiated for configuration '{}'. Dissolved {} groups containing {} nodes. New groups will form automatically.",
request.configuration_name,
group_count,
node_count
),
"dissolved_groups": group_count,
"affected_nodes": node_count
}))
}
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to get groups: {}", e)
})),
}
} else {
HttpResponse::ServiceUnavailable().json(json!({
"success": false,
"error": "Node groups plugin is not enabled"
}))
}
}
pub(crate) fn groups_routes() -> Scope {
web::scope("/groups")
.route("", get().to(get_groups))
.route("/configs", get().to(get_configurations))
.route("/force-regroup", post().to(force_regroup))
.route("/{group_id}", delete().to(delete_group))
.route("/{group_id}/logs", get().to(get_group_logs))
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/metrics.rs | crates/orchestrator/src/api/routes/metrics.rs | use crate::api::server::AppState;
use actix_web::{
web::{self, delete, get, post, Data, Path},
HttpResponse, Scope,
};
use log::error;
use serde::Deserialize;
use serde_json::json;
use utoipa::ToSchema;
#[derive(Deserialize, ToSchema)]
struct ManualMetricEntry {
label: String,
value: f64,
}
#[derive(Deserialize, ToSchema)]
struct DeleteMetricRequest {
label: String,
address: String,
}
#[utoipa::path(
get,
path = "/metrics",
responses(
(status = 200, description = "Aggregate metrics for all tasks retrieved successfully"),
(status = 500, description = "Internal server error")
),
tag = "metrics"
)]
async fn get_metrics(app_state: Data<AppState>) -> HttpResponse {
let metrics = match app_state
.store_context
.metrics_store
.get_aggregate_metrics_for_all_tasks()
.await
{
Ok(metrics) => metrics,
Err(e) => {
error!("Error getting aggregate metrics for all tasks: {e}");
Default::default()
}
};
HttpResponse::Ok().json(json!({"success": true, "metrics": metrics}))
}
#[utoipa::path(
get,
path = "/metrics/all",
responses(
(status = 200, description = "All metrics retrieved successfully"),
(status = 500, description = "Internal server error")
),
tag = "metrics"
)]
async fn get_all_metrics(app_state: Data<AppState>) -> HttpResponse {
let metrics = match app_state
.store_context
.metrics_store
.get_all_metrics()
.await
{
Ok(metrics) => metrics,
Err(e) => {
error!("Error getting all metrics: {e}");
Default::default()
}
};
HttpResponse::Ok().json(json!({"success": true, "metrics": metrics}))
}
#[utoipa::path(
get,
path = "/metrics/prometheus",
responses(
(status = 200, description = "Prometheus metrics exported successfully", content_type = "text/plain"),
(status = 500, description = "Internal server error")
),
tag = "metrics"
)]
async fn get_prometheus_metrics(app_state: Data<AppState>) -> HttpResponse {
match app_state.metrics.export_metrics() {
Ok(metrics) => HttpResponse::Ok()
.content_type("text/plain; version=0.0.4")
.body(metrics),
Err(e) => HttpResponse::InternalServerError().json(json!({
"success": false,
"error": format!("Failed to export metrics: {}", e)
})),
}
}
#[utoipa::path(
post,
path = "/metrics",
request_body = ManualMetricEntry,
responses(
(status = 200, description = "Manual metric created successfully"),
(status = 500, description = "Internal server error")
),
tag = "metrics"
)]
// for potential backup restore purposes
async fn create_metric(
app_state: Data<AppState>,
metric: web::Json<ManualMetricEntry>,
) -> HttpResponse {
if let Err(e) = app_state
.store_context
.metrics_store
.store_manual_metrics(metric.label.clone(), metric.value)
.await
{
error!("Error storing manual metric: {e}");
}
HttpResponse::Ok().json(json!({"success": true}))
}
#[utoipa::path(
delete,
path = "/metrics/{task_id}",
params(
("task_id" = String, Path, description = "Task ID to delete metrics for")
),
request_body = DeleteMetricRequest,
responses(
(status = 200, description = "Metric deleted successfully"),
(status = 500, description = "Internal server error")
),
tag = "metrics"
)]
async fn delete_metric(
app_state: Data<AppState>,
task_id: Path<String>,
body: web::Json<DeleteMetricRequest>,
) -> HttpResponse {
let success = match app_state
.store_context
.metrics_store
.delete_metric(&task_id, &body.label, &body.address)
.await
{
Ok(success) => success,
Err(e) => {
error!("Error deleting metric: {e}");
false
}
};
HttpResponse::Ok().json(json!({
"success": success
}))
}
pub(crate) fn metrics_routes() -> Scope {
web::scope("/metrics")
.route("", get().to(get_metrics))
.route("/all", get().to(get_all_metrics))
.route("/prometheus", get().to(get_prometheus_metrics))
.route("", post().to(create_metric))
.route("/{task_id}", delete().to(delete_metric))
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/api/routes/heartbeat.rs | crates/orchestrator/src/api/routes/heartbeat.rs | use crate::{api::server::AppState, models::node::NodeStatus};
use actix_web::{
web::{self, post, Data},
HttpResponse, Scope,
};
use alloy::primitives::Address;
use log::error;
use serde_json::json;
use shared::models::{
api::ApiResponse,
heartbeat::{HeartbeatRequest, HeartbeatResponse},
};
use std::collections::HashSet;
use std::str::FromStr;
async fn heartbeat(
heartbeat: web::Json<HeartbeatRequest>,
app_state: Data<AppState>,
) -> HttpResponse {
let task_info = heartbeat.clone();
let Ok(node_address) = Address::from_str(&heartbeat.address) else {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Invalid node address format"
}));
};
// Track heartbeat request in metrics
app_state
.metrics
.increment_heartbeat_requests(&heartbeat.address);
let node_opt = app_state
.store_context
.node_store
.get_node(&node_address)
.await;
match node_opt {
Ok(Some(node)) => {
if node.status == NodeStatus::Banned {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Node is banned"
}));
}
}
_ => {
return HttpResponse::BadRequest().json(json!({
"success": false,
"error": "Node not found"
}));
}
}
if let Err(e) = app_state
.store_context
.node_store
.update_node_task(
node_address,
task_info.task_id.clone(),
task_info.task_state.clone(),
task_info.task_details,
)
.await
{
error!("Error updating node task: {e}");
}
// Record task state metric if task information is available
if let (Some(task_id), Some(task_state)) = (&task_info.task_id, &task_info.task_state) {
app_state
.metrics
.set_task_state(&heartbeat.address, task_id, task_state);
}
if let Some(p2p_id) = &heartbeat.p2p_id {
if let Err(e) = app_state
.store_context
.node_store
.update_node_p2p_id(&node_address, p2p_id)
.await
{
error!("Error updating node p2p id: {e}");
}
}
if let Err(e) = app_state
.store_context
.heartbeat_store
.beat(&heartbeat)
.await
{
error!("Heartbeat Error: {e}");
}
if let Some(metrics) = heartbeat.metrics.clone() {
// Get current metric keys for this node efficiently using HKEYS
let previous_metric_keys = match app_state
.store_context
.metrics_store
.get_metric_keys_for_node(node_address)
.await
{
Ok(keys) => keys,
Err(e) => {
error!("Error getting metric keys for node: {e}");
Vec::new()
}
};
// Create a HashSet of new metric keys for efficient lookup
let new_metrics_set: HashSet<String> = metrics
.iter()
.map(|metric| {
let task_id = if metric.key.task_id.is_empty() {
"manual".to_string()
} else {
metric.key.task_id.clone()
};
format!("{}:{}", task_id, metric.key.label.replace(':', ""))
})
.collect();
// Find stale metrics to delete
let stale_metrics: Vec<String> = previous_metric_keys
.into_iter()
.filter(|key| !new_metrics_set.contains(key))
.collect();
// Delete stale metrics efficiently
for metric_key in stale_metrics {
if let Some((task_id, label)) = metric_key.split_once(':') {
if let Err(e) = app_state
.store_context
.metrics_store
.delete_metric(task_id, label, &node_address.to_string())
.await
{
error!("Error deleting metric: {e}");
}
}
}
// Store new metrics in Redis
if let Err(e) = app_state
.store_context
.metrics_store
.store_metrics(Some(metrics.clone()), node_address)
.await
{
error!("Error storing metrics: {e}");
}
}
let current_task = app_state.scheduler.get_task_for_node(node_address).await;
match current_task {
Ok(Some(task)) => {
let resp: HttpResponse = ApiResponse::new(
true,
HeartbeatResponse {
current_task: Some(task),
},
)
.into();
resp
}
_ => HttpResponse::Ok().json(ApiResponse::new(
true,
HeartbeatResponse { current_task: None },
)),
}
}
pub(crate) fn heartbeat_routes() -> Scope {
web::scope("/heartbeat").route("", post().to(heartbeat))
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use super::*;
use crate::api::tests::helper::create_test_app_state;
use crate::metrics::sync_service::MetricsSyncService;
use crate::models::node::OrchestratorNode;
use crate::ServerMode;
use actix_web::http::StatusCode;
use actix_web::test;
use actix_web::App;
use serde_json::json;
use shared::models::metric::MetricEntry;
use shared::models::metric::MetricKey;
use shared::models::task::TaskRequest;
#[actix_web::test]
async fn test_heartbeat() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/heartbeat", web::post().to(heartbeat)),
)
.await;
let address = "0x0000000000000000000000000000000000000000".to_string();
let node_address = Address::from_str(&address).unwrap();
let node = OrchestratorNode {
address: node_address,
status: NodeStatus::Healthy,
..Default::default()
};
let _ = app_state.store_context.node_store.add_node(node).await;
let req_payload = json!({"address": address, "metrics": [
{"key": {"task_id": "long-task-1234", "label": "performance/batch_avg_seq_length"}, "value": 1.0},
{"key": {"task_id": "long-task-1234", "label": "performance/batch_min_seq_length"}, "value": 5.0}
]});
let req = test::TestRequest::post()
.uri("/heartbeat")
.set_json(&req_payload)
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(json["current_task"], serde_json::Value::Null);
let node_address = Address::from_str(&address).unwrap();
let value = app_state
.store_context
.heartbeat_store
.get_heartbeat(&node_address)
.await
.unwrap();
assert_eq!(
value,
Some(HeartbeatRequest {
address: "0x0000000000000000000000000000000000000000".to_string(),
task_id: None,
task_state: None,
metrics: Some(vec![
MetricEntry {
key: MetricKey {
task_id: "long-task-1234".to_string(),
label: "performance/batch_avg_seq_length".to_string(),
},
value: 1.0,
},
MetricEntry {
key: MetricKey {
task_id: "long-task-1234".to_string(),
label: "performance/batch_min_seq_length".to_string(),
},
value: 5.0,
}
]),
version: None,
timestamp: None,
p2p_id: None,
task_details: None,
})
);
// Verify metrics are stored in Redis (heartbeat API responsibility)
let redis_metrics = app_state
.store_context
.metrics_store
.get_metrics_for_node(node_address)
.await
.unwrap();
assert!(redis_metrics.contains_key("long-task-1234"));
assert!(redis_metrics["long-task-1234"].contains_key("performance/batch_avg_seq_length"));
assert!(redis_metrics["long-task-1234"].contains_key("performance/batch_min_seq_length"));
// Test metrics sync service: Redis -> Prometheus
// Verify Prometheus registry is initially empty (no sync service has run)
let prometheus_metrics_before = app_state.metrics.export_metrics().unwrap();
assert!(!prometheus_metrics_before.contains("performance/batch_avg_seq_length"));
assert!(prometheus_metrics_before.contains(&format!("orchestrator_heartbeat_requests_total{{node_address=\"0x0000000000000000000000000000000000000000\",pool_id=\"{}\"}} 1", app_state.metrics.pool_id)));
// Create and run sync service manually to test the sync functionality
let sync_service = MetricsSyncService::new(
app_state.store_context.clone(),
app_state.metrics.clone(),
ServerMode::Full, // Test app uses Full mode
10,
None, // No node groups plugin in test
);
// Manually trigger a sync operation
sync_service.sync_metrics_from_redis().await.unwrap();
// Verify Prometheus registry now contains the metrics from Redis
let prometheus_metrics_after = app_state.metrics.export_metrics().unwrap();
assert!(prometheus_metrics_after.contains("performance/batch_avg_seq_length"));
assert!(prometheus_metrics_after.contains("performance/batch_min_seq_length"));
assert!(prometheus_metrics_after.contains("long-task-1234"));
let heartbeat_two = json!({"address": address, "metrics": [
{"key": {"task_id": "long-task-1235", "label": "performance/batch_len"}, "value": 10.0},
{"key": {"task_id": "long-task-1235", "label": "performance/batch_min_len"}, "value": 50.0}
]});
let req = test::TestRequest::post()
.uri("/heartbeat")
.set_json(&heartbeat_two)
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
// Verify new metrics in Redis and old metrics cleaned up
let redis_metrics = app_state
.store_context
.metrics_store
.get_metrics_for_node(node_address)
.await
.unwrap();
assert!(redis_metrics.contains_key("long-task-1235"));
assert!(redis_metrics["long-task-1235"].contains_key("performance/batch_len"));
assert!(redis_metrics["long-task-1235"].contains_key("performance/batch_min_len"));
assert!(!redis_metrics.contains_key("long-task-1234")); // Stale metrics cleaned up
let aggregated_metrics = app_state
.store_context
.metrics_store
.get_aggregate_metrics_for_all_tasks()
.await
.unwrap();
assert_eq!(aggregated_metrics.len(), 2);
assert_eq!(aggregated_metrics.get("performance/batch_len"), Some(&10.0));
assert_eq!(
aggregated_metrics.get("performance/batch_min_len"),
Some(&50.0)
);
assert_eq!(
aggregated_metrics.get("performance/batch_avg_seq_length"),
None
);
sync_service.sync_metrics_from_redis().await.unwrap();
let prometheus_metrics_after_two = app_state.metrics.export_metrics().unwrap();
assert!(prometheus_metrics_after_two.contains("performance/batch_len"));
assert!(prometheus_metrics_after_two.contains("performance/batch_min_len"));
assert!(prometheus_metrics_after_two.contains("long-task-1235"));
assert!(!prometheus_metrics_after_two.contains("long-task-1234"));
let heartbeat_three = json!({"address": address, "metrics": [
]});
let req = test::TestRequest::post()
.uri("/heartbeat")
.set_json(&heartbeat_three)
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
// Verify all metrics cleaned up from Redis
let redis_metrics = app_state
.store_context
.metrics_store
.get_metrics_for_node(node_address)
.await
.unwrap();
assert!(redis_metrics.is_empty()); // All metrics for this node should be gone
let aggregated_metrics = app_state
.store_context
.metrics_store
.get_aggregate_metrics_for_all_tasks()
.await
.unwrap();
assert_eq!(aggregated_metrics, HashMap::new());
}
#[actix_web::test]
async fn test_heartbeat_with_task() {
let app_state = create_test_app_state().await;
let app = test::init_service(
App::new()
.app_data(app_state.clone())
.route("/heartbeat", web::post().to(heartbeat)),
)
.await;
let address = "0x0000000000000000000000000000000000000000".to_string();
let task = TaskRequest {
image: "test".to_string(),
name: "test".to_string(),
..Default::default()
};
let node = OrchestratorNode {
address: Address::from_str(&address).unwrap(),
status: NodeStatus::Healthy,
..Default::default()
};
let _ = app_state.store_context.node_store.add_node(node).await;
let task = match task.try_into() {
Ok(task) => task,
Err(e) => panic!("Failed to convert TaskRequest to Task: {}", e),
};
let _ = app_state.store_context.task_store.add_task(task).await;
let req = test::TestRequest::post()
.uri("/heartbeat")
.set_json(json!({"address": "0x0000000000000000000000000000000000000000"}))
.to_request();
let resp = test::call_service(&app, req).await;
let body = test::read_body(resp).await;
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(json["success"], serde_json::Value::Bool(true));
assert_eq!(
json["data"]["current_task"]["image"],
serde_json::Value::String("test".to_string())
);
let node_address = Address::from_str(&address).unwrap();
let value = app_state
.store_context
.heartbeat_store
.get_heartbeat(&node_address)
.await
.unwrap();
// Task has not started yet
let heartbeat = HeartbeatRequest {
address: "0x0000000000000000000000000000000000000000".to_string(),
task_details: None,
..Default::default()
};
assert_eq!(value, Some(heartbeat));
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/mod.rs | crates/orchestrator/src/plugins/mod.rs | use crate::plugins::newest_task::NewestTaskPlugin;
use alloy::primitives::Address;
use anyhow::Result;
use shared::models::task::Task;
use std::sync::Arc;
use crate::{
models::node::{NodeStatus, OrchestratorNode},
plugins::node_groups::NodeGroupsPlugin,
plugins::webhook::WebhookPlugin,
};
pub(crate) mod newest_task;
pub(crate) mod node_groups;
pub(crate) mod webhook;
#[derive(Clone)]
pub enum StatusUpdatePlugin {
NodeGroupsPlugin(Arc<NodeGroupsPlugin>),
WebhookPlugin(WebhookPlugin),
}
impl StatusUpdatePlugin {
pub(crate) async fn handle_status_change(
&self,
node: &OrchestratorNode,
status: &NodeStatus,
) -> Result<()> {
match self {
StatusUpdatePlugin::NodeGroupsPlugin(plugin) => plugin.handle_status_change(node).await,
StatusUpdatePlugin::WebhookPlugin(plugin) => plugin.handle_status_change(node, status),
}
}
}
impl From<Arc<NodeGroupsPlugin>> for StatusUpdatePlugin {
fn from(plugin: Arc<NodeGroupsPlugin>) -> Self {
StatusUpdatePlugin::NodeGroupsPlugin(plugin)
}
}
impl From<&Arc<NodeGroupsPlugin>> for StatusUpdatePlugin {
fn from(plugin: &Arc<NodeGroupsPlugin>) -> Self {
StatusUpdatePlugin::NodeGroupsPlugin(plugin.clone())
}
}
impl From<WebhookPlugin> for StatusUpdatePlugin {
fn from(plugin: WebhookPlugin) -> Self {
StatusUpdatePlugin::WebhookPlugin(plugin)
}
}
impl From<&WebhookPlugin> for StatusUpdatePlugin {
fn from(plugin: &WebhookPlugin) -> Self {
StatusUpdatePlugin::WebhookPlugin(plugin.clone())
}
}
#[derive(Clone)]
pub enum SchedulerPlugin {
NodeGroupsPlugin(Arc<NodeGroupsPlugin>),
NewestTaskPlugin(NewestTaskPlugin),
}
impl SchedulerPlugin {
pub(crate) async fn filter_tasks(
&self,
tasks: &[Task],
node_address: &Address,
) -> Result<Vec<Task>> {
match self {
SchedulerPlugin::NodeGroupsPlugin(plugin) => {
plugin.filter_tasks(tasks, node_address).await
}
SchedulerPlugin::NewestTaskPlugin(plugin) => plugin.filter_tasks(tasks),
}
}
}
impl From<Arc<NodeGroupsPlugin>> for SchedulerPlugin {
fn from(plugin: Arc<NodeGroupsPlugin>) -> Self {
SchedulerPlugin::NodeGroupsPlugin(plugin)
}
}
impl From<&Arc<NodeGroupsPlugin>> for SchedulerPlugin {
fn from(plugin: &Arc<NodeGroupsPlugin>) -> Self {
SchedulerPlugin::NodeGroupsPlugin(plugin.clone())
}
}
impl From<NewestTaskPlugin> for SchedulerPlugin {
fn from(plugin: NewestTaskPlugin) -> Self {
SchedulerPlugin::NewestTaskPlugin(plugin)
}
}
impl From<&NewestTaskPlugin> for SchedulerPlugin {
fn from(plugin: &NewestTaskPlugin) -> Self {
SchedulerPlugin::NewestTaskPlugin(plugin.clone())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/newest_task/mod.rs | crates/orchestrator/src/plugins/newest_task/mod.rs | use anyhow::Result;
use shared::models::task::Task;
#[derive(Clone)]
pub struct NewestTaskPlugin;
impl NewestTaskPlugin {
pub(crate) fn filter_tasks(&self, tasks: &[Task]) -> Result<Vec<Task>> {
if tasks.is_empty() {
return Ok(vec![]);
}
// Find newest task based on created_at timestamp
Ok(tasks
.iter()
.max_by_key(|task| task.created_at)
.map(|task| vec![task.clone()])
.unwrap_or_default())
}
}
#[cfg(test)]
mod tests {
use shared::models::task::TaskState;
use uuid::Uuid;
use super::*;
#[test]
fn test_filter_tasks() {
let plugin = NewestTaskPlugin;
let tasks = vec![
Task {
id: Uuid::new_v4(),
image: "image".to_string(),
name: "name".to_string(),
state: TaskState::PENDING,
created_at: 1,
..Default::default()
},
Task {
id: Uuid::new_v4(),
image: "image".to_string(),
name: "name".to_string(),
state: TaskState::PENDING,
created_at: 2,
..Default::default()
},
];
let filtered_tasks = plugin.filter_tasks(&tasks).unwrap();
assert_eq!(filtered_tasks.len(), 1);
assert_eq!(filtered_tasks[0].id, tasks[1].id);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/node_groups/status_update_impl.rs | crates/orchestrator/src/plugins/node_groups/status_update_impl.rs | use crate::models::node::{NodeStatus, OrchestratorNode};
use crate::plugins::node_groups::NodeGroupsPlugin;
use anyhow::Error;
use anyhow::Result;
use log::info;
impl NodeGroupsPlugin {
pub(crate) async fn handle_status_change(&self, node: &OrchestratorNode) -> Result<(), Error> {
let node_addr = node.address.to_string();
info!(
"Handling node status change in group plugin: node {} status is now {:?}",
node_addr, node.status
);
match node.status {
NodeStatus::Dead | NodeStatus::LowBalance => {
// Dissolve entire group if node becomes unhealthy
if let Some(group) = self.get_node_group(&node_addr).await? {
info!(
"Node {} became {}, dissolving entire group {} with {} nodes",
node_addr,
node.status,
group.id,
group.nodes.len()
);
self.dissolve_group(&group.id).await?;
}
}
_ => {
info!(
"No group action needed for node {} with status {:?}",
node_addr, node.status
);
}
}
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/node_groups/tests.rs | crates/orchestrator/src/plugins/node_groups/tests.rs | use crate::{
models::node::{NodeStatus, OrchestratorNode},
plugins::node_groups::{
NodeGroup, NodeGroupConfiguration, NodeGroupsPlugin, ProximityOptimizationPolicy,
TaskSwitchingPolicy, GROUP_KEY_PREFIX, NODE_GROUP_MAP_KEY,
},
store::core::{RedisStore, StoreContext},
};
use alloy::primitives::Address;
use redis::Commands;
use shared::models::node::NodeLocation;
use shared::models::{
node::{ComputeRequirements, ComputeSpecs, GpuSpecs},
task::{SchedulingConfig, Task, TaskState},
};
use std::collections::BTreeSet;
use std::{collections::HashMap, str::FromStr, sync::Arc};
use crate::plugins::node_groups::enable_configuration;
use crate::plugins::node_groups::get_task_topologies;
use uuid::Uuid;
fn create_test_node(
addr: &str,
status: NodeStatus,
compute_specs: Option<ComputeSpecs>,
) -> OrchestratorNode {
// Generate a deterministic IP address from the Ethereum address
let addr_bytes = Address::from_str(addr).unwrap().to_vec();
let ip_address = format!(
"{}.{}.{}.{}",
addr_bytes[0], addr_bytes[1], addr_bytes[2], addr_bytes[3]
);
OrchestratorNode {
address: Address::from_str(addr).unwrap(),
ip_address,
port: 8080,
status,
p2p_id: Some("test_p2p_id".to_string()),
compute_specs,
..Default::default()
}
}
fn create_test_node_with_location(
addr: &str,
status: NodeStatus,
compute_specs: Option<ComputeSpecs>,
location: NodeLocation,
) -> OrchestratorNode {
let mut node = create_test_node(addr, status, compute_specs);
node.location = Some(location);
node
}
#[tokio::test]
async fn test_parsing_groups_from_string() {
let group_config = r#"
[
{
"name": "a100-group",
"min_group_size": 2,
"max_group_size": 2,
"compute_requirements": "gpu:model=A100;gpu:count=8"
},
{
"name": "h100-group",
"min_group_size": 1,
"max_group_size": 1,
"compute_requirements": "gpu:count=8;gpu:model=H100"
}
]
"#;
let groups = serde_json::from_str::<Vec<NodeGroupConfiguration>>(group_config).unwrap();
assert_eq!(groups.len(), 2);
// Check A100 group config
let a100_config = &groups[0];
assert_eq!(a100_config.name, "a100-group");
assert_eq!(a100_config.min_group_size, 2);
assert_eq!(a100_config.max_group_size, 2);
let a100_requirements = a100_config.compute_requirements.as_ref().unwrap();
assert_eq!(a100_requirements.gpu.len(), 1);
let a100_gpu_spec = &a100_requirements.gpu[0];
assert_eq!(a100_gpu_spec.model, Some("A100".to_string()));
assert_eq!(a100_gpu_spec.count, Some(8));
// Check H100 group config
let h100_config = &groups[1];
assert_eq!(h100_config.name, "h100-group");
assert_eq!(h100_config.min_group_size, 1);
assert_eq!(h100_config.max_group_size, 1);
let h100_requirements = h100_config.compute_requirements.as_ref().unwrap();
assert_eq!(h100_requirements.gpu.len(), 1);
let h100_gpu_spec = &h100_requirements.gpu[0];
assert_eq!(h100_gpu_spec.model, Some("H100".to_string()));
assert_eq!(h100_gpu_spec.count, Some(8));
}
#[tokio::test]
async fn test_group_formation_and_dissolution() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 2,
max_group_size: 5,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
let _ = plugin.try_form_new_groups().await;
// Add first healthy node
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let _ = plugin.try_form_new_groups().await;
// Add second healthy node to form group
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let _ = plugin.try_form_new_groups().await;
// Verify group was created+
let _ = plugin.try_form_new_groups().await;
let mut conn = plugin.store.client.get_connection().unwrap();
let group_id: Option<String> = conn
.hget(NODE_GROUP_MAP_KEY, node1.address.to_string())
.unwrap();
assert!(group_id.is_some());
// Make node unhealthy
let node1_dead = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Dead,
None,
);
let _ = plugin
.store_context
.node_store
.update_node_status(&node1_dead.address, NodeStatus::Dead)
.await;
let _ = plugin.handle_status_change(&node1_dead).await;
let _ = plugin.try_form_new_groups().await;
// Verify group was dissolved
let group_id: Option<String> = conn
.hget(NODE_GROUP_MAP_KEY, node1.address.to_string())
.unwrap();
assert!(group_id.is_none());
}
#[tokio::test]
async fn test_group_formation_with_multiple_configs() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let config_s = NodeGroupConfiguration {
name: "test-config-s".to_string(),
min_group_size: 2,
max_group_size: 2,
compute_requirements: None,
};
let config_xs = NodeGroupConfiguration {
name: "test-config-xs".to_string(),
min_group_size: 1,
max_group_size: 1,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config_s, config_xs],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config-s".to_string(), "test-config-xs".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
// Add first healthy node
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
// Add second healthy node to form group
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let node3 = create_test_node(
"0x3234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node3.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let mut conn = plugin.store.client.get_connection().unwrap();
let groups: Vec<String> = conn
.keys(format!("{}*", GROUP_KEY_PREFIX).as_str())
.unwrap();
assert_eq!(groups.len(), 2);
// Verify group was created
let mut conn = plugin.store.client.get_connection().unwrap();
let group_id: Option<String> = conn
.hget(NODE_GROUP_MAP_KEY, node1.address.to_string())
.unwrap();
assert!(group_id.is_some());
let group_id: Option<String> = conn
.hget(NODE_GROUP_MAP_KEY, node2.address.to_string())
.unwrap();
assert!(group_id.is_some());
let group_id: Option<String> = conn
.hget(NODE_GROUP_MAP_KEY, node3.address.to_string())
.unwrap();
assert!(group_id.is_some());
}
#[tokio::test]
async fn test_group_formation_with_requirements_and_single_node() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let requirement_str = "gpu:count=8;gpu:model=RTX4090;";
let requirements = ComputeRequirements::from_str(requirement_str).unwrap();
let config = NodeGroupConfiguration {
name: "test-config-with-requirements".to_string(),
min_group_size: 1,
max_group_size: 1,
compute_requirements: Some(requirements),
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config-with-requirements".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let _ = plugin.try_form_new_groups().await;
// Ensure node is not in a group since it does not meet requirements
let group_id_node_1 = plugin
.get_node_group(&node1.address.to_string())
.await
.unwrap();
assert!(group_id_node_1.is_none());
let node_2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
Some(ComputeSpecs {
gpu: Some(GpuSpecs {
count: Some(8),
model: Some("RTX4090".to_string()),
memory_mb: Some(24),
indices: Some(vec![0]),
}),
..Default::default()
}),
);
let _ = plugin
.store_context
.node_store
.add_node(node_2.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let group_id_node_2 = plugin
.get_node_group(&node_2.address.to_string())
.await
.unwrap();
assert!(group_id_node_2.is_some());
}
#[tokio::test]
async fn test_group_formation_with_requirements_and_multiple_nodes() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let requirement_str = "gpu:count=8;gpu:model=RTX4090;";
let requirements = ComputeRequirements::from_str(requirement_str).unwrap();
let config = NodeGroupConfiguration {
name: "test-config-with-requirements".to_string(),
min_group_size: 2,
max_group_size: 2,
compute_requirements: Some(requirements),
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config-with-requirements".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
Some(ComputeSpecs {
gpu: Some(GpuSpecs {
count: Some(8),
model: Some("RTX4090".to_string()),
memory_mb: Some(24),
indices: Some(vec![0]),
}),
..Default::default()
}),
);
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let group_id_node_1 = plugin
.get_node_group(&node1.address.to_string())
.await
.unwrap();
assert!(group_id_node_1.is_none());
let group_id_node_2 = plugin
.get_node_group(&node2.address.to_string())
.await
.unwrap();
assert!(group_id_node_2.is_none());
let node3 = create_test_node(
"0x3234567890123456789012345678901234567890",
NodeStatus::Healthy,
Some(ComputeSpecs {
gpu: Some(GpuSpecs {
count: Some(8),
model: Some("RTX4090".to_string()),
memory_mb: Some(24),
indices: Some(vec![0]),
}),
..Default::default()
}),
);
let _ = plugin
.store_context
.node_store
.add_node(node3.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let group_id_node_3 = plugin
.get_node_group(&node3.address.to_string())
.await
.unwrap();
assert!(group_id_node_3.is_some());
let group_id_node_2 = plugin
.get_node_group(&node2.address.to_string())
.await
.unwrap();
assert!(group_id_node_2.is_some());
// Node 1 does not fullfill the requirements - hence it will not get added to the group
let group_id_node_1 = plugin
.get_node_group(&node1.address.to_string())
.await
.unwrap();
assert!(group_id_node_1.is_none());
}
#[tokio::test]
async fn test_group_scheduling() {
let store: Arc<RedisStore> = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 2,
max_group_size: 5,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let mut env_vars = HashMap::new();
env_vars.insert("LOCAL_RANK".to_string(), "0".to_string());
env_vars.insert("RANK".to_string(), "${GROUP_INDEX}".to_string());
env_vars.insert("WORLD_SIZE".to_string(), "${GROUP_SIZE}".to_string());
env_vars.insert("GROUP_ID".to_string(), "${GROUP_ID}".to_string());
env_vars.insert(
"TOTAL_UPLOAD_COUNT".to_string(),
"${TOTAL_UPLOAD_COUNT}".to_string(),
);
env_vars.insert("LAST_FILE_IDX".to_string(), "${LAST_FILE_IDX}".to_string());
let task1 = Task {
id: Uuid::new_v4(),
image: "prime-vllm".to_string(),
name: "test-task".to_string(),
env_vars: Some(env_vars),
cmd: Some(vec![
"uv".to_string(),
"run".to_string(),
"generate.py".to_string(),
"--model".to_string(),
"model/Qwen3-14B-${GROUP_INDEX}.${GROUP_SIZE}".to_string(),
"--top-p".to_string(),
"0.95".to_string(),
"--group-id".to_string(),
"${GROUP_ID}".to_string(),
"--upload-count".to_string(),
"${TOTAL_UPLOAD_COUNT}".to_string(),
"--file-number".to_string(),
"${LAST_FILE_IDX}".to_string(),
]),
entrypoint: None,
state: TaskState::PENDING,
created_at: 0,
..Default::default()
};
let _ = plugin
.store_context
.task_store
.add_task(task1.clone())
.await;
let mut task2 = task1.clone();
task2.id = Uuid::new_v4();
let _ = plugin
.store_context
.task_store
.add_task(task2.clone())
.await;
let mut task3 = task1.clone();
task3.id = Uuid::new_v4();
let _ = plugin
.store_context
.task_store
.add_task(task3.clone())
.await;
let tasks = vec![task1, task2, task3];
let filtered_tasks = plugin.filter_tasks(&tasks, &node1.address).await.unwrap();
assert_eq!(filtered_tasks.len(), 0);
let _ = plugin.try_form_new_groups().await;
let mut tasks_clone = tasks.clone();
tasks_clone.reverse();
assert_ne!(tasks_clone[0].id, tasks[0].id);
let group = plugin
.get_node_group(&node1.address.to_string())
.await
.unwrap();
assert!(group.is_some());
let group = group.unwrap();
let mut redis_con = plugin.store.client.get_connection().unwrap();
let upload_key = format!("upload:{}:{}:test.txt", node1.address, group.id);
let _: () = redis_con.set(&upload_key, "pending").unwrap();
let (filtered_tasks_1, filtered_tasks_2) = tokio::join!(
async { plugin.filter_tasks(&tasks, &node1.address).await },
async { plugin.filter_tasks(&tasks_clone, &node2.address).await }
);
// Check both nodes get assigned valid and different indexes
// Also ensure both nodes get the same task
let filtered_tasks_1 = filtered_tasks_1.unwrap();
let filtered_tasks_2 = filtered_tasks_2.unwrap();
assert_eq!(filtered_tasks_1.len(), 1);
let task_node_1 = &filtered_tasks_1[0];
let env_vars_1 = task_node_1.env_vars.as_ref().unwrap();
assert_eq!(env_vars_1.get("GROUP_INDEX").unwrap(), "0");
assert_eq!(env_vars_1.get("RANK").unwrap(), "0");
assert_eq!(env_vars_1.get("WORLD_SIZE").unwrap(), "2");
assert_eq!(task_node_1.cmd.as_ref().unwrap()[4], "model/Qwen3-14B-0.2");
assert_ne!(env_vars_1.get("GROUP_ID").unwrap(), "${GROUP_ID}");
assert_eq!(env_vars_1.get("TOTAL_UPLOAD_COUNT").unwrap(), "1");
assert_eq!(env_vars_1.get("LAST_FILE_IDX").unwrap(), "0");
assert_eq!(task_node_1.cmd.as_ref().unwrap()[10], "1"); // Check upload count in cmd
assert_eq!(filtered_tasks_2.len(), 1);
let task_node_2 = &filtered_tasks_2[0];
let env_vars_2 = task_node_2.env_vars.as_ref().unwrap();
assert_eq!(env_vars_2.get("GROUP_INDEX").unwrap(), "1");
assert_eq!(env_vars_2.get("RANK").unwrap(), "1");
assert_eq!(env_vars_2.get("WORLD_SIZE").unwrap(), "2");
assert_eq!(task_node_2.cmd.as_ref().unwrap()[4], "model/Qwen3-14B-1.2");
assert_ne!(env_vars_2.get("GROUP_ID").unwrap(), "${GROUP_ID}");
assert_eq!(env_vars_2.get("TOTAL_UPLOAD_COUNT").unwrap(), "0");
assert_eq!(env_vars_2.get("LAST_FILE_IDX").unwrap(), "0");
assert_eq!(task_node_2.cmd.as_ref().unwrap()[10], "0"); // Check upload count in cmd
assert_eq!(task_node_1.id, task_node_2.id);
}
#[tokio::test]
async fn test_group_scheduling_without_tasks() {
let store: Arc<RedisStore> = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 2,
max_group_size: 5,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let tasks = vec![];
let filtered_tasks = plugin.filter_tasks(&tasks, &node1.address).await.unwrap();
assert_eq!(filtered_tasks.len(), 0);
let _ = plugin.try_form_new_groups().await;
let filtered_tasks = plugin.filter_tasks(&tasks, &node1.address).await.unwrap();
assert_eq!(filtered_tasks.len(), 0);
let filtered_tasks = plugin.filter_tasks(&tasks, &node2.address).await.unwrap();
assert_eq!(filtered_tasks.len(), 0);
}
#[tokio::test]
async fn test_group_formation_with_max_size() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
// Set max group size to 2
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 2,
max_group_size: 2,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
// Create three healthy nodes
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let node3 = create_test_node(
"0x3234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node3.clone())
.await;
// Handle status changes to trigger group formation
let _ = plugin.try_form_new_groups().await;
// Create a test task
let mut env_vars = HashMap::new();
env_vars.insert("RANK".to_string(), "${GROUP_INDEX}".to_string());
env_vars.insert("WORLD_SIZE".to_string(), "${GROUP_SIZE}".to_string());
let task = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
env_vars: Some(env_vars),
cmd: Some(vec![
"run".to_string(),
"--index".to_string(),
"${GROUP_INDEX}".to_string(),
]),
entrypoint: None,
state: TaskState::PENDING,
created_at: 0,
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
let tasks = vec![task];
// Check if node1 and node2 are in a group
let group1 = plugin
.get_node_group(&node1.address.to_string())
.await
.unwrap();
let group2 = plugin
.get_node_group(&node2.address.to_string())
.await
.unwrap();
// Check if node3 is not in a group
let group3 = plugin
.get_node_group(&node3.address.to_string())
.await
.unwrap();
// Either node1 and node2 are in a group, or node2 and node3 are in a group
// But all three cannot be in the same group due to max_group_size=2
assert!(
(group1.is_some()
&& group2.is_some()
&& group1.as_ref() == group2.as_ref()
&& group3.is_none())
|| (group2.is_some()
&& group3.is_some()
&& group2.as_ref() == group3.as_ref()
&& group1.is_none())
|| (group1.is_some()
&& group3.is_some()
&& group1.as_ref() == group3.as_ref()
&& group2.is_none())
);
// Verify that tasks are only assigned to nodes in a group
for node in [&node1, &node2, &node3] {
let filtered_tasks = plugin.filter_tasks(&tasks, &node.address).await.unwrap();
let group = plugin
.get_node_group(&node.address.to_string())
.await
.unwrap();
if group.is_some() {
assert_eq!(
filtered_tasks.len(),
1,
"Node in group should receive tasks"
);
} else {
assert_eq!(
filtered_tasks.len(),
0,
"Node not in group should not receive tasks"
);
}
}
}
#[tokio::test]
async fn test_node_groups_with_allowed_topologies() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 1,
max_group_size: 1,
compute_requirements: None,
};
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let task_no_match = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
env_vars: None,
cmd: Some(vec![
"run".to_string(),
"--index".to_string(),
"${GROUP_INDEX}".to_string(),
]),
entrypoint: None,
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["no-match-config".to_string()],
)]),
)])),
}),
state: TaskState::PENDING,
created_at: 0,
updated_at: None,
..Default::default()
};
let _ = plugin
.store_context
.task_store
.add_task(task_no_match.clone())
.await;
let _ = plugin.try_form_new_groups().await;
let mut tasks = vec![task_no_match];
let filtered_tasks = plugin.filter_tasks(&tasks, &node1.address).await.unwrap();
assert_eq!(filtered_tasks.len(), 0);
let task_match = Task {
id: Uuid::new_v4(),
image: "test-image".to_string(),
name: "test-task".to_string(),
env_vars: None,
cmd: Some(vec![
"run".to_string(),
"--index".to_string(),
"${GROUP_INDEX}".to_string(),
]),
entrypoint: None,
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin
.store_context
.task_store
.add_task(task_match.clone())
.await;
let _ = plugin.try_form_new_groups().await;
tasks.push(task_match);
let filtered_tasks = plugin.filter_tasks(&tasks, &node1.address).await.unwrap();
assert_eq!(filtered_tasks.len(), 1);
}
#[tokio::test]
async fn test_node_cannot_be_in_multiple_groups() {
let store = Arc::new(RedisStore::new_test());
let context_store = store.clone();
let store_context = Arc::new(StoreContext::new(context_store));
let config = NodeGroupConfiguration {
name: "test-config".to_string(),
min_group_size: 2,
max_group_size: 2,
compute_requirements: None,
};
// Set max_group_size to 2, so groups can only have 2 nodes
let plugin = Arc::new(NodeGroupsPlugin::new(
vec![config],
store.clone(),
store_context.clone(),
None,
None,
));
let _ = store_context.task_store.add_observer(plugin.clone()).await;
let all_nodes = plugin.store_context.node_store.get_nodes().await.unwrap();
assert_eq!(all_nodes.len(), 0, "No nodes should be in the store");
// Create three nodes
let node1 = create_test_node(
"0x1234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let node2 = create_test_node(
"0x2234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
let node3 = create_test_node(
"0x3234567890123456789012345678901234567890",
NodeStatus::Healthy,
None,
);
// Add nodes to the store
let _ = plugin
.store_context
.node_store
.add_node(node1.clone())
.await;
let _ = plugin
.store_context
.node_store
.add_node(node2.clone())
.await;
let _ = plugin
.store_context
.node_store
.add_node(node3.clone())
.await;
let task = Task {
scheduling_config: Some(SchedulingConfig {
plugins: Some(HashMap::from([(
"node_groups".to_string(),
HashMap::from([(
"allowed_topologies".to_string(),
vec!["test-config".to_string()],
)]),
)])),
}),
..Default::default()
};
let _ = plugin.store_context.task_store.add_task(task.clone()).await;
// Add nodes to groups through the normal flow
let _ = plugin.try_form_new_groups().await;
// Get connection to check Redis state
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/node_groups/mod.rs | crates/orchestrator/src/plugins/node_groups/mod.rs | use super::webhook::WebhookPlugin;
use crate::models::node::{NodeStatus, OrchestratorNode};
use crate::store::core::{RedisStore, StoreContext};
use crate::utils::loop_heartbeats::LoopHeartbeats;
use anyhow::Error;
use anyhow::Result;
use log::{debug, error, info, warn};
use rand::seq::IteratorRandom;
use redis::{AsyncCommands, Script};
use serde::{Deserialize, Serialize};
use shared::models::node::{ComputeRequirements, NodeLocation};
use shared::models::task::Task;
use std::time::Duration;
use std::{collections::BTreeSet, sync::Arc};
use std::{
collections::{HashMap, HashSet},
str::FromStr,
};
pub(crate) mod scheduler_impl;
pub(crate) mod status_update_impl;
#[cfg(test)]
mod tests;
const GROUP_KEY_PREFIX: &str = "node_group:";
const NODE_GROUP_MAP_KEY: &str = "node_to_group";
const GROUP_TASK_KEY_PREFIX: &str = "group_task:";
const GROUPS_INDEX_KEY: &str = "orchestrator:groups_index";
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub struct NodeGroupConfiguration {
pub name: String,
pub min_group_size: usize,
pub max_group_size: usize,
#[serde(deserialize_with = "deserialize_compute_requirements")]
pub compute_requirements: Option<ComputeRequirements>,
}
fn deserialize_compute_requirements<'de, D>(
deserializer: D,
) -> Result<Option<ComputeRequirements>, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: Option<String> = Option::deserialize(deserializer)?;
match s {
Some(s) => ComputeRequirements::from_str(&s)
.map(Some)
.map_err(serde::de::Error::custom),
None => Ok(None),
}
}
impl NodeGroupConfiguration {
pub fn is_valid(&self) -> bool {
if self.max_group_size < self.min_group_size {
return false;
}
true
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)]
pub struct NodeGroup {
pub id: String,
pub nodes: BTreeSet<String>,
pub created_at: chrono::DateTime<chrono::Utc>,
pub configuration_name: String,
}
#[derive(Debug, Clone, PartialEq)]
pub struct TaskSwitchingPolicy {
/// Whether to enable task switching at all
pub enabled: bool,
/// Prefer forming larger groups even if it means switching tasks
pub prefer_larger_groups: bool,
}
#[derive(Debug, Clone, PartialEq)]
pub struct ProximityOptimizationPolicy {
/// Optimize node groups based on proximity?
pub enabled: bool,
}
impl Default for ProximityOptimizationPolicy {
fn default() -> Self {
Self { enabled: true }
}
}
impl Default for TaskSwitchingPolicy {
fn default() -> Self {
Self {
enabled: true,
prefer_larger_groups: true,
}
}
}
pub struct NodeGroupsPlugin {
configuration_templates: Vec<NodeGroupConfiguration>,
store: Arc<RedisStore>,
store_context: Arc<StoreContext>,
node_groups_heartbeats: Option<Arc<LoopHeartbeats>>,
webhook_plugins: Option<Vec<WebhookPlugin>>,
task_switching_policy: TaskSwitchingPolicy,
proximity_optimization_policy: ProximityOptimizationPolicy,
}
impl NodeGroupsPlugin {
pub fn new(
configuration_templates: Vec<NodeGroupConfiguration>,
store: Arc<RedisStore>,
store_context: Arc<StoreContext>,
node_groups_heartbeats: Option<Arc<LoopHeartbeats>>,
webhook_plugins: Option<Vec<WebhookPlugin>>,
) -> Self {
Self::new_with_policy(
configuration_templates,
store,
store_context,
node_groups_heartbeats,
webhook_plugins,
TaskSwitchingPolicy::default(),
ProximityOptimizationPolicy::default(),
)
}
pub fn new_with_policy(
configuration_templates: Vec<NodeGroupConfiguration>,
store: Arc<RedisStore>,
store_context: Arc<StoreContext>,
node_groups_heartbeats: Option<Arc<LoopHeartbeats>>,
webhook_plugins: Option<Vec<WebhookPlugin>>,
task_switching_policy: TaskSwitchingPolicy,
proximity_optimization_policy: ProximityOptimizationPolicy,
) -> Self {
let mut sorted_configs = configuration_templates;
let mut seen_names = HashSet::new();
for config in &sorted_configs {
if !seen_names.insert(config.name.clone()) {
panic!("Configuration names must be unique");
}
if !config.is_valid() {
panic!("Plugin configuration is invalid");
}
}
sorted_configs.sort_by(|a, b| {
// First priority: min_group_size (descending)
let size_cmp = b.min_group_size.cmp(&a.min_group_size);
if size_cmp != std::cmp::Ordering::Equal {
return size_cmp;
}
// Second priority: configurations with requirements come before those without
// This ensures specific configs (GPU, etc.) get processed before general configs
match (&a.compute_requirements, &b.compute_requirements) {
(Some(_), None) => std::cmp::Ordering::Less,
(None, Some(_)) => std::cmp::Ordering::Greater,
_ => std::cmp::Ordering::Equal,
}
});
Self {
configuration_templates: sorted_configs,
store,
store_context,
node_groups_heartbeats,
webhook_plugins,
task_switching_policy,
proximity_optimization_policy,
}
}
// TODO: this should consume self; refactor this to separate running logic
// and other components. it appears quite a lot of different logic is
// combined into this one type
pub async fn run_group_management_loop(&self, duration: u64) -> Result<(), Error> {
let mut interval = tokio::time::interval(Duration::from_secs(duration));
loop {
let start = std::time::Instant::now();
interval.tick().await;
// First, form new groups with optimal sizing
if let Err(e) = self.try_form_new_groups().await {
error!("Error in group formation: {e}");
}
if let Err(e) = self.try_merge_solo_groups().await {
error!("Error in group merging: {e}");
}
if let Some(heartbeats) = &self.node_groups_heartbeats {
heartbeats.update_node_groups();
}
let elapsed = start.elapsed();
log::info!("Group management loop completed in {elapsed:?}");
}
}
/// Check if a node is compatible with a configuration's compute requirements
fn is_node_compatible_with_config(
config: &NodeGroupConfiguration,
node: &OrchestratorNode,
) -> bool {
match (&config.compute_requirements, &node.compute_specs) {
(Some(reqs), Some(specs)) => specs.meets(reqs),
(None, _) => true,
_ => false,
}
}
/// Calculate the distance between two geographic locations using the Haversine formula
fn calculate_distance(loc1: &NodeLocation, loc2: &NodeLocation) -> f64 {
const EARTH_RADIUS_KM: f64 = 6371.0;
let lat1_rad = loc1.latitude.to_radians();
let lat2_rad = loc2.latitude.to_radians();
let delta_lat = (loc2.latitude - loc1.latitude).to_radians();
let delta_lon = (loc2.longitude - loc1.longitude).to_radians();
let a = (delta_lat / 2.0).sin().powi(2)
+ lat1_rad.cos() * lat2_rad.cos() * (delta_lon / 2.0).sin().powi(2);
let c = 2.0 * a.sqrt().atan2((1.0 - a).sqrt());
EARTH_RADIUS_KM * c
}
/// Sort nodes by proximity to a reference node
fn sort_nodes_by_proximity(
reference_node: &OrchestratorNode,
nodes: &mut Vec<&OrchestratorNode>,
) {
if let Some(ref_location) = &reference_node.location {
nodes.sort_by(|a, b| {
let dist_a = a
.location
.as_ref()
.map(|loc| Self::calculate_distance(ref_location, loc))
.unwrap_or(f64::MAX);
let dist_b = b
.location
.as_ref()
.map(|loc| Self::calculate_distance(ref_location, loc))
.unwrap_or(f64::MAX);
dist_a
.partial_cmp(&dist_b)
.unwrap_or(std::cmp::Ordering::Equal)
});
}
}
/// Determine if task switching is beneficial for group formation
async fn should_switch_tasks(
&self,
current_groups: &[NodeGroup],
potential_merged_size: usize,
) -> Result<bool, Error> {
// Check if task switching is enabled
if !self.task_switching_policy.enabled {
return Ok(false);
}
// Must be solo groups to consider switching
if !current_groups.iter().all(|g| g.nodes.len() == 1) {
return Ok(false);
}
if potential_merged_size < 2 {
return Ok(false);
}
// If prefer_larger_groups is disabled and all groups have tasks, don't switch
if !self.task_switching_policy.prefer_larger_groups {
for group in current_groups {
if self.get_current_group_task(&group.id).await?.is_some() {
debug!(
"Group {} has task and prefer_larger_groups is disabled",
group.id
);
return Ok(false);
}
}
}
debug!(
"Task switching beneficial: merging {} solo groups into group of size {} (policy: prefer_larger_groups={})",
current_groups.len(),
potential_merged_size,
self.task_switching_policy.prefer_larger_groups
);
Ok(true)
}
/// Atomically create a new group with Redis operations
async fn create_group_atomically(
&self,
group: &NodeGroup,
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<(), Error> {
let mut pipe = redis::pipe();
pipe.atomic();
// Store group data
let group_key = get_group_key(&group.id);
let group_data = serde_json::to_string(group)?;
pipe.set(&group_key, group_data);
// Add group ID to groups index
pipe.sadd(GROUPS_INDEX_KEY, &group.id);
// Map nodes to group
for node in &group.nodes {
pipe.hset(NODE_GROUP_MAP_KEY, node, &group.id);
}
pipe.query_async::<()>(conn).await?;
Ok(())
}
pub async fn get_node_group(&self, node_addr: &str) -> Result<Option<NodeGroup>, Error> {
let mut conn = self.store.client.get_multiplexed_async_connection().await?;
let group_id: Option<String> = conn.hget(NODE_GROUP_MAP_KEY, node_addr).await?;
if let Some(group_id) = group_id {
let group_key = get_group_key(&group_id);
let group_data: Option<String> = conn.get(&group_key).await?;
if let Some(group_data) = group_data {
return Ok(Some(serde_json::from_str(&group_data)?));
}
}
Ok(None)
}
pub async fn get_node_groups_batch(
&self,
node_addresses: &[String],
) -> Result<HashMap<String, Option<NodeGroup>>, Error> {
let mut conn = self.store.client.get_multiplexed_async_connection().await?;
let mut result = HashMap::new();
if node_addresses.is_empty() {
return Ok(result);
}
let mut pipe = redis::pipe();
for node_addr in node_addresses {
pipe.hget(NODE_GROUP_MAP_KEY, node_addr);
}
let group_ids: Vec<Option<String>> = pipe.query_async(&mut conn).await?;
let unique_group_ids: HashSet<String> = group_ids
.iter()
.filter_map(|opt| opt.as_ref())
.cloned()
.collect();
// Step 3: Batch fetch all group data
let group_data: HashMap<String, NodeGroup> = if !unique_group_ids.is_empty() {
let group_keys: Vec<String> = unique_group_ids
.iter()
.map(|id| get_group_key(id))
.collect();
let group_values: Vec<Option<String>> = conn.mget(&group_keys).await?;
unique_group_ids
.into_iter()
.zip(group_values.into_iter())
.filter_map(|(group_id, group_json)| {
group_json.and_then(|json| {
serde_json::from_str::<NodeGroup>(&json)
.map_err(|e| {
error!("Failed to parse group {group_id} data: {e}");
e
})
.ok()
.map(|group| (group_id, group))
})
})
.collect()
} else {
HashMap::new()
};
// Step 4: Build result mapping node addresses to their groups
for (node_addr, group_id) in node_addresses.iter().zip(group_ids.iter()) {
let group = group_id.as_ref().and_then(|id| group_data.get(id)).cloned();
result.insert(node_addr.clone(), group);
}
Ok(result)
}
pub async fn get_available_configurations(&self) -> Vec<NodeGroupConfiguration> {
let Ok(mut conn) = self.store.client.get_multiplexed_async_connection().await else {
return vec![];
};
let available_configs: HashSet<String> = conn
.smembers("available_node_group_configs")
.await
.unwrap_or_default();
let mut configs: Vec<NodeGroupConfiguration> = self
.configuration_templates
.iter()
.filter(|config| available_configs.contains(&config.name))
.cloned()
.collect();
configs.sort_by(|a, b| b.min_group_size.cmp(&a.min_group_size));
configs
}
pub fn get_all_configuration_templates(&self) -> Vec<NodeGroupConfiguration> {
self.configuration_templates.clone()
}
pub fn get_idx_in_group(
&self,
node_group: &NodeGroup,
node_addr: &str,
) -> Result<usize, Error> {
node_group
.nodes
.iter()
.position(|n| n == &node_addr.to_string())
.ok_or_else(|| anyhow::anyhow!("Node {} not found in group", node_addr))
}
async fn get_current_group_task(&self, group_id: &str) -> Result<Option<Task>, Error> {
let mut conn = self.store.client.get_multiplexed_async_connection().await?;
let task_key = format!("{GROUP_TASK_KEY_PREFIX}{group_id}");
let task_id: Option<String> = conn.get(&task_key).await?;
if let Some(task_id) = task_id {
if let Some(task) = self.store_context.task_store.get_task(&task_id).await? {
return Ok(Some(task));
}
warn!("Task id set but task not found");
let script = Script::new(
r#"
local task_key = KEYS[1]
local expected_task_id = ARGV[1]
local current_task_id = redis.call('GET', task_key)
if current_task_id == expected_task_id then
redis.call('DEL', task_key)
return 1
else
return 0
end
"#,
);
let _: () = script
.key(&task_key)
.arg(task_id)
.invoke_async(&mut conn)
.await?;
}
Ok(None)
}
pub async fn assign_task_to_group(&self, group_id: &str, task_id: &str) -> Result<bool, Error> {
let mut conn = self.store.client.get_multiplexed_async_connection().await?;
let task_key = format!("{GROUP_TASK_KEY_PREFIX}{group_id}");
let result: bool = conn.set_nx::<_, _, bool>(&task_key, task_id).await?;
Ok(result)
}
async fn try_form_new_groups(&self) -> Result<Vec<NodeGroup>, Error> {
let mut conn = self.store.client.get_multiplexed_async_connection().await?;
let mut formed_groups = Vec::new();
let available_configurations = self.get_available_configurations().await;
debug!("Available configurations: {available_configurations:?}");
let nodes = self.store_context.node_store.get_nodes().await?;
let assigned_nodes: std::collections::HashMap<String, String> =
conn.hgetall(NODE_GROUP_MAP_KEY).await?;
debug!("Assigned nodes: {assigned_nodes:?}");
let mut healthy_nodes = nodes
.iter()
.filter(|node| node.status == NodeStatus::Healthy)
.filter(|node| node.p2p_id.is_some())
.filter(|node| !assigned_nodes.contains_key(&node.address.to_string()))
.collect::<Vec<&OrchestratorNode>>();
info!(
"Found {} healthy nodes for potential group formation",
healthy_nodes.len()
);
let mut total_available = healthy_nodes.len();
for config in &available_configurations {
debug!("Checking configuration: {config:?}");
while total_available >= config.min_group_size {
let initial_available = total_available;
// Find compatible nodes for this configuration
let compatible_nodes: Vec<&OrchestratorNode> = healthy_nodes
.iter()
.filter(|node| Self::is_node_compatible_with_config(config, node))
.cloned()
.collect();
if compatible_nodes.len() < config.min_group_size {
break;
}
let mut available_nodes = BTreeSet::new();
let mut nodes_to_remove = Vec::new();
if self.proximity_optimization_policy.enabled {
// Start with a seed node (prefer nodes with location data)
let seed_node = compatible_nodes
.iter()
.find(|n| n.location.is_some())
.or(compatible_nodes.first())
.copied();
if let Some(seed) = seed_node {
// Add the seed node
available_nodes.insert(seed.address.to_string());
nodes_to_remove.push(seed.address.to_string());
// Sort remaining nodes by proximity to seed
let mut remaining_compatible: Vec<&OrchestratorNode> = compatible_nodes
.into_iter()
.filter(|n| n.address != seed.address)
.collect();
Self::sort_nodes_by_proximity(seed, &mut remaining_compatible);
// Add closest nodes until we reach the desired size
for node in remaining_compatible {
if available_nodes.len() >= config.max_group_size {
break;
}
available_nodes.insert(node.address.to_string());
nodes_to_remove.push(node.address.to_string());
}
}
} else {
for node in compatible_nodes {
if available_nodes.len() >= config.max_group_size {
break;
}
available_nodes.insert(node.address.to_string());
nodes_to_remove.push(node.address.to_string());
}
}
// Not enough nodes to form a group
if available_nodes.len() < config.min_group_size {
break;
}
// Create new group
let group_id = generate_group_id();
debug!("Generating new group with ID: {group_id}");
let group = NodeGroup {
id: group_id.clone(),
nodes: available_nodes.clone(),
created_at: chrono::Utc::now(),
configuration_name: config.name.clone(),
};
debug!("Created new group structure: {group:?}");
// Use atomic group creation helper
self.create_group_atomically(&group, &mut conn).await?;
// Remove used nodes from healthy_nodes
let prev_healthy_count = healthy_nodes.len();
healthy_nodes.retain(|node| !nodes_to_remove.contains(&node.address.to_string()));
total_available = healthy_nodes.len();
debug!(
"Removed {} nodes from healthy pool ({} remaining)",
prev_healthy_count - healthy_nodes.len(),
healthy_nodes.len()
);
info!(
"Created new group {} with {} nodes for configuration {}{}",
group_id,
available_nodes.len(),
config.name,
if available_nodes.len() == config.max_group_size {
" (limited by max size)"
} else {
""
}
);
debug!("Group details: {group:?}");
formed_groups.push(group);
if total_available == initial_available {
break; // No progress made, exit this config
}
}
}
let webhook_groups = formed_groups.clone();
for group in webhook_groups {
if let Some(plugins) = &self.webhook_plugins {
for plugin in plugins.iter() {
if let Err(e) = plugin.send_group_created(
group.id.clone(),
group.configuration_name.clone(),
group.nodes.iter().cloned().collect(),
) {
error!("Failed to send group created webhook: {e}");
}
}
}
}
Ok(formed_groups)
}
/// Try to merge single-node groups when possible, including task switching
async fn try_merge_solo_groups(&self) -> Result<Vec<NodeGroup>, Error> {
debug!("Starting solo group merging process with task switching support");
let mut conn = self.store.client.get_multiplexed_async_connection().await?;
let mut merged_groups = Vec::new();
// Get all existing groups and available configurations
let all_groups = self.get_all_groups().await?;
// Quick optimization: check if there are any solo groups before proceeding
let solo_groups_count = all_groups.iter().filter(|g| g.nodes.len() == 1).count();
if solo_groups_count < 2 {
debug!("Found {solo_groups_count} solo groups, merging not beneficial");
return Ok(merged_groups);
}
let available_configurations = self.get_available_configurations().await;
debug!(
"Found {} total groups ({} solo) for potential merging",
all_groups.len(),
solo_groups_count
);
for config in &available_configurations {
// up-to-date list of groups
let current_groups = self.get_all_groups().await?;
let config_merged_groups = self
.try_merge_groups_for_config(¤t_groups, config, &mut conn)
.await?;
merged_groups.extend(config_merged_groups);
}
if !merged_groups.is_empty() {
info!(
"Group merging completed: created {} new merged groups",
merged_groups.len()
);
} else {
debug!("No groups were merged");
}
Ok(merged_groups)
}
/// Try to merge groups for a specific configuration
async fn try_merge_groups_for_config(
&self,
all_groups: &[NodeGroup],
config: &NodeGroupConfiguration,
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<Vec<NodeGroup>, Error> {
let mut merged_groups = Vec::new();
// Find compatible solo groups (including those with tasks)
let compatible_groups = self.find_compatible_solo_groups(all_groups, config).await?;
if compatible_groups.len() < config.min_group_size {
debug!("Not enough compatible groups to merge");
return Ok(merged_groups);
}
// Group merging attempts
let mut remaining_groups = compatible_groups;
while remaining_groups.len() >= config.min_group_size {
let merge_result = self
.attempt_group_merge(&remaining_groups, config, conn)
.await?;
match merge_result {
Some((merged_group, used_group_ids)) => {
merged_groups.push(merged_group);
remaining_groups.retain(|g| !used_group_ids.contains(&g.id));
}
None => break, // No more merges possible
}
}
Ok(merged_groups)
}
/// Find solo groups compatible with a configuration
async fn find_compatible_solo_groups(
&self,
all_groups: &[NodeGroup],
config: &NodeGroupConfiguration,
) -> Result<Vec<NodeGroup>, Error> {
let nodes = self.store_context.node_store.get_nodes().await?;
let node_specs: HashMap<String, &OrchestratorNode> = nodes
.iter()
.map(|node| (node.address.to_string(), node))
.collect();
let mut compatible_groups = Vec::new();
for group in all_groups {
if group.nodes.len() == 1
&& self.is_group_compatible_with_config(group, config, &node_specs)
{
compatible_groups.push(group.clone());
}
}
Ok(compatible_groups)
}
/// Check if a group is compatible with a configuration
fn is_group_compatible_with_config(
&self,
group: &NodeGroup,
config: &NodeGroupConfiguration,
node_specs: &HashMap<String, &OrchestratorNode>,
) -> bool {
group.nodes.iter().all(|node_addr| {
node_specs
.get(node_addr)
.map(|node| Self::is_node_compatible_with_config(config, node))
.unwrap_or(false)
})
}
/// Attempt to merge a group of compatible groups
async fn attempt_group_merge(
&self,
compatible_groups: &[NodeGroup],
config: &NodeGroupConfiguration,
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<Option<(NodeGroup, Vec<String>)>, Error> {
let mut merge_batch = Vec::new();
let mut total_nodes = BTreeSet::new();
let mut groups_to_dissolve = Vec::new();
// If proximity optimization is enabled, try to use location-based selection
if self.proximity_optimization_policy.enabled {
// Get node information for location data
let nodes = self.store_context.node_store.get_nodes().await?;
let node_map: HashMap<String, &OrchestratorNode> = nodes
.iter()
.map(|node| (node.address.to_string(), node))
.collect();
// Try to find a seed group with location data
let seed_group = compatible_groups.iter().find(|group| {
group
.nodes
.iter()
.next()
.and_then(|addr| node_map.get(addr))
.and_then(|node| node.location.as_ref())
.is_some()
});
if let Some(seed) = seed_group {
// Found a seed with location, use proximity-based selection
let seed_node = node_map.get(seed.nodes.iter().next().unwrap()).unwrap();
merge_batch.push(seed.clone());
total_nodes.extend(seed.nodes.iter().cloned());
groups_to_dissolve.push(seed.id.clone());
// Create a sorted list of remaining groups by proximity
let mut remaining_with_distance: Vec<(f64, &NodeGroup)> = compatible_groups
.iter()
.filter(|g| g.id != seed.id)
.filter_map(|group| {
let node_addr = group.nodes.iter().next()?;
let node = node_map.get(node_addr)?;
let node_loc = node.location.as_ref()?;
let seed_loc = seed_node.location.as_ref()?;
let distance = Self::calculate_distance(seed_loc, node_loc);
Some((distance, group))
})
.collect();
remaining_with_distance
.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap_or(std::cmp::Ordering::Equal));
// Add closest groups first
for (_, group) in remaining_with_distance {
if total_nodes.len() + group.nodes.len() <= config.max_group_size {
merge_batch.push(group.clone());
total_nodes.extend(group.nodes.iter().cloned());
groups_to_dissolve.push(group.id.clone());
if total_nodes.len() >= config.max_group_size {
break;
}
}
}
}
}
// If no proximity-based selection happened or we still need more groups, use original logic
if merge_batch.is_empty()
|| (total_nodes.len() < config.max_group_size
&& total_nodes.len() < config.min_group_size)
{
// Reset if we didn't get enough nodes
if total_nodes.len() < config.min_group_size {
merge_batch.clear();
total_nodes.clear();
groups_to_dissolve.clear();
}
// Original selection logic
for group in compatible_groups {
if !groups_to_dissolve.contains(&group.id)
&& total_nodes.len() + group.nodes.len() <= config.max_group_size
{
merge_batch.push(group.clone());
total_nodes.extend(group.nodes.iter().cloned());
groups_to_dissolve.push(group.id.clone());
if total_nodes.len() >= config.max_group_size {
break;
}
}
}
}
// Validate merge conditions
if !self
.is_merge_beneficial(&merge_batch, total_nodes.len())
.await?
{
return Ok(None);
}
// Perform the merge
self.execute_group_merge(&merge_batch, config, &total_nodes, conn)
.await
}
/// Check if merging these groups would be beneficial
async fn is_merge_beneficial(
&self,
groups: &[NodeGroup],
new_size: usize,
) -> Result<bool, Error> {
if groups.len() < 2 || new_size < 2 {
return Ok(false);
}
// Check if task switching is beneficial
self.should_switch_tasks(groups, new_size).await
}
/// Execute the actual group merge with Redis operations
async fn execute_group_merge(
&self,
groups_to_merge: &[NodeGroup],
config: &NodeGroupConfiguration,
merged_nodes: &BTreeSet<String>,
conn: &mut redis::aio::MultiplexedConnection,
) -> Result<Option<(NodeGroup, Vec<String>)>, Error> {
let group_ids_to_dissolve: Vec<String> =
groups_to_merge.iter().map(|g| g.id.clone()).collect();
// Create new merged group
let new_group_id = generate_group_id();
let merged_group = NodeGroup {
id: new_group_id.clone(),
nodes: merged_nodes.clone(),
created_at: chrono::Utc::now(),
configuration_name: config.name.clone(),
};
// Find the best task for the new group before starting transaction
let selected_task = self.find_best_task_for_group(&merged_group).await?;
// Begin atomic Redis transaction
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/node_groups/scheduler_impl.rs | crates/orchestrator/src/plugins/node_groups/scheduler_impl.rs | use super::NodeGroupsPlugin;
use alloy::primitives::Address;
use anyhow::Result;
use log::{error, info};
use rand::seq::IteratorRandom;
use redis::AsyncCommands;
use shared::models::task::Task;
use std::str::FromStr;
impl NodeGroupsPlugin {
pub(crate) async fn filter_tasks(
&self,
tasks: &[Task],
node_address: &Address,
) -> Result<Vec<Task>> {
if let Ok(Some(group)) = self.get_node_group(&node_address.to_string()).await {
info!(
"Node {} is in group {} with {} nodes",
node_address,
group.id,
group.nodes.len()
);
let idx = match self.get_idx_in_group(&group, &node_address.to_string()) {
Ok(idx) => idx,
Err(e) => {
error!("Failed to get index in group: {e}");
return Ok(vec![]);
}
};
let mut current_task: Option<Task> = None;
match self.get_current_group_task(&group.id).await {
Ok(Some(task)) => {
current_task = Some(task);
}
Ok(None) => {
if tasks.is_empty() {
return Ok(vec![]);
}
let applicable_tasks: Vec<Task> = tasks
.iter()
.filter(|&task| match &task.scheduling_config {
None => true,
Some(config) => {
match config.plugins.as_ref().and_then(|p| p.get("node_groups")) {
None => true,
Some(node_config) => {
match node_config.get("allowed_topologies") {
None => true,
Some(topologies) => {
topologies.contains(&group.configuration_name)
}
}
}
}
}
})
.cloned()
.collect();
if applicable_tasks.is_empty() {
return Ok(vec![]);
}
// Select a random task before any await points
let selected_task = {
let mut rng = rand::rng();
applicable_tasks.into_iter().choose(&mut rng)
};
if let Some(new_task) = selected_task {
let task_id = new_task.id.to_string();
match self.assign_task_to_group(&group.id, &task_id).await {
Ok(true) => {
// Successfully assigned the task
current_task = Some(new_task.clone());
}
Ok(false) => {
// Another node already assigned a task, try to get it
if let Ok(Some(task)) = self.get_current_group_task(&group.id).await
{
current_task = Some(task);
}
}
Err(e) => {
error!("Failed to assign task to group: {e}");
// Check if group still exists - might have been dissolved during merge
if let Ok(false) = self.validate_group_exists(&group.id).await {
error!(
"Group {} no longer exists, likely dissolved during merge",
group.id
);
// Try to handle the orphaned task
if let Err(recovery_err) =
self.handle_group_not_found(&group.id, &task_id).await
{
error!(
"Failed to recover from dissolved group: {recovery_err}"
);
}
return Ok(vec![]); // Node should retry on next scheduling cycle
}
}
}
}
}
_ => {}
}
if let Some(t) = current_task {
let mut task_clone = t.clone();
let next_node_idx = (idx + 1) % group.nodes.len();
let next_node_addr = group.nodes.iter().nth(next_node_idx).unwrap();
// Get p2p_id for next node from node store
let next_p2p_id = if let Ok(Some(next_node)) = self
.store_context
.node_store
.get_node(&Address::from_str(next_node_addr).unwrap())
.await
{
next_node.p2p_id.unwrap_or_default()
} else {
String::new()
};
// Temporary hack to get the upload count
let pattern = format!("upload:{}:{}:*", node_address, group.id);
let total_upload_count =
match self.store.client.get_multiplexed_async_connection().await {
Ok(mut conn) => {
let mut keys: Vec<String> = Vec::new();
match conn.scan_match(&pattern).await {
Ok(mut iter) => {
while let Some(key) = iter.next_item().await {
keys.push(key);
}
keys.len().to_string()
}
Err(e) => {
error!("Failed to scan upload keys: {e}");
"0".to_string()
}
}
}
Err(e) => {
error!("Failed to get Redis connection: {e}");
"0".to_string()
}
};
// File number starts with 0 for the first file, while filecount is at 1
let last_file_idx = total_upload_count
.parse::<u32>()
.unwrap_or(0)
.saturating_sub(1);
let mut env_vars = task_clone.env_vars.unwrap_or_default();
env_vars.insert("GROUP_INDEX".to_string(), idx.to_string());
for (_, value) in env_vars.iter_mut() {
let new_value = value
.replace("${GROUP_INDEX}", &idx.to_string())
.replace("${GROUP_SIZE}", &group.nodes.len().to_string())
.replace("${NEXT_P2P_ADDRESS}", &next_p2p_id)
.replace("${GROUP_ID}", &group.id)
.replace("${TOTAL_UPLOAD_COUNT}", &total_upload_count.to_string())
.replace("${LAST_FILE_IDX}", &last_file_idx.to_string());
*value = new_value;
}
task_clone.env_vars = Some(env_vars);
task_clone.cmd = task_clone.cmd.map(|cmd| {
cmd.into_iter()
.map(|arg| {
arg.replace("${GROUP_INDEX}", &idx.to_string())
.replace("${GROUP_SIZE}", &group.nodes.len().to_string())
.replace("${NEXT_P2P_ADDRESS}", &next_p2p_id)
.replace("${GROUP_ID}", &group.id)
.replace("${TOTAL_UPLOAD_COUNT}", &total_upload_count.to_string())
.replace("${LAST_FILE_IDX}", &last_file_idx.to_string())
})
.collect::<Vec<String>>()
});
// Replace group variables in volume mounts if they exist
if let Some(volume_mounts) = task_clone.volume_mounts {
task_clone.volume_mounts = Some(
volume_mounts
.into_iter()
.map(|mut volume_mount| {
volume_mount.host_path =
volume_mount.host_path.replace("${GROUP_ID}", &group.id);
volume_mount.container_path = volume_mount
.container_path
.replace("${GROUP_ID}", &group.id);
volume_mount
})
.collect(),
);
}
return Ok(vec![task_clone]);
}
}
info!("Node {node_address} is not in a group, skipping all tasks");
Ok(vec![])
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/plugins/webhook/mod.rs | crates/orchestrator/src/plugins/webhook/mod.rs | use std::{sync::Arc, time::Duration};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use crate::models::node::{NodeStatus, OrchestratorNode};
use log::{error, info, warn};
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "event", content = "data")]
pub(crate) enum WebhookEvent {
#[serde(rename = "node.status_changed")]
NodeStatusChanged {
node_address: String,
ip_address: String,
port: u16,
old_status: String,
new_status: String,
},
#[serde(rename = "group.created")]
GroupCreated {
group_id: String,
configuration_name: String,
nodes: Vec<String>,
},
#[serde(rename = "group.destroyed")]
GroupDestroyed {
group_id: String,
configuration_name: String,
nodes: Vec<String>,
},
#[serde(rename = "metrics.updated")]
MetricsUpdated {
pool_id: u32,
#[serde(flatten)]
metrics: std::collections::HashMap<String, f64>,
},
}
#[derive(Debug, Clone, Serialize)]
pub(crate) struct WebhookPayload {
#[serde(flatten)]
pub event: WebhookEvent,
pub timestamp: String,
}
impl WebhookPayload {
pub(crate) fn new(event: WebhookEvent) -> Self {
#[cfg(test)]
let timestamp = "2024-01-01T00:00:00Z".to_string();
#[cfg(not(test))]
let timestamp = chrono::Utc::now().to_rfc3339();
Self { event, timestamp }
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WebhookConfig {
pub url: String,
pub bearer_token: Option<String>,
}
#[derive(Debug, Clone)]
pub struct WebhookPlugin {
webhook_url: String,
client: Arc<reqwest::Client>,
}
impl WebhookPlugin {
const MAX_RETRIES: u32 = 5;
const REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
const BASE_RETRY_DELAY_MS: u64 = 500;
const MAX_RETRY_DELAY_MS: u64 = 10000;
pub fn new(webhook_config: WebhookConfig) -> Self {
let client = Arc::new(
reqwest::Client::builder()
.default_headers(Self::build_headers(&webhook_config.bearer_token))
.timeout(Self::REQUEST_TIMEOUT)
.build()
.expect("Failed to build HTTP client"),
);
Self {
webhook_url: webhook_config.url,
client,
}
}
fn build_headers(bearer_token: &Option<String>) -> reqwest::header::HeaderMap {
let mut headers = reqwest::header::HeaderMap::new();
if let Some(token) = bearer_token {
headers.insert(
reqwest::header::AUTHORIZATION,
reqwest::header::HeaderValue::from_str(&format!("Bearer {token}"))
.expect("Invalid bearer token"),
);
}
headers
}
fn calculate_retry_delay(attempt: u32) -> Duration {
let delay_ms = std::cmp::min(
Self::BASE_RETRY_DELAY_MS * (2_u64.pow(attempt)),
Self::MAX_RETRY_DELAY_MS,
);
Duration::from_millis(delay_ms)
}
fn get_event_name(event: &WebhookEvent) -> &'static str {
match event {
WebhookEvent::NodeStatusChanged { .. } => "node.status_changed",
WebhookEvent::GroupCreated { .. } => "group.created",
WebhookEvent::GroupDestroyed { .. } => "group.destroyed",
WebhookEvent::MetricsUpdated { .. } => "metrics.updated",
}
}
async fn send_webhook_request(&self, payload: &WebhookPayload) -> Result<(), Error> {
let start_time = std::time::Instant::now();
let response = self
.client
.post(&self.webhook_url)
.json(payload)
.send()
.await?;
let duration = start_time.elapsed();
let status = response.status();
if response.status().is_success() {
info!(
"Webhook '{}' sent successfully to {} (HTTP {}, took {:?})",
Self::get_event_name(&payload.event),
self.webhook_url,
status,
duration
);
Ok(())
} else {
let error_text = response
.text()
.await
.unwrap_or_else(|_| "Failed to read error response".to_string());
Err(anyhow::anyhow!(
"HTTP {} after {:?}: {}",
status,
duration,
error_text
))
}
}
async fn send_with_retry(&self, payload: WebhookPayload) -> Result<(), Error> {
let event_name = Self::get_event_name(&payload.event);
let mut last_error = None;
info!(
"Sending webhook '{}' to {} (max {} retries)",
event_name,
self.webhook_url,
Self::MAX_RETRIES
);
for attempt in 0..=Self::MAX_RETRIES {
match self.send_webhook_request(&payload).await {
Ok(()) => {
if attempt > 0 {
info!(
"Webhook '{}' succeeded on attempt {} after retries",
event_name,
attempt + 1
);
}
return Ok(());
}
Err(e) => {
if attempt < Self::MAX_RETRIES {
let delay = Self::calculate_retry_delay(attempt);
warn!(
"Webhook '{}' attempt {} failed: {}, retrying in {:?}",
event_name,
attempt + 1,
e,
delay
);
tokio::time::sleep(delay).await;
}
last_error = Some(e);
}
}
}
let error = last_error.unwrap_or_else(|| anyhow::anyhow!("Unknown error"));
error!(
"Failed to send webhook '{}' to {} after {} attempts: {}",
event_name,
self.webhook_url,
Self::MAX_RETRIES + 1,
error
);
Err(error)
}
fn send_event(&self, event: WebhookEvent) -> Result<(), Error> {
let payload = WebhookPayload::new(event);
let webhook_url = self.webhook_url.clone();
let client = self.client.clone();
tokio::spawn(async move {
let plugin = WebhookPlugin {
webhook_url,
client,
};
if let Err(e) = plugin.send_with_retry(payload).await {
// Error already logged in send_with_retry
let _ = e;
}
});
Ok(())
}
pub fn send_node_status_changed(
&self,
node: &OrchestratorNode,
old_status: &NodeStatus,
) -> Result<(), Error> {
let event = WebhookEvent::NodeStatusChanged {
node_address: node.address.to_string(),
ip_address: node.ip_address.clone(),
port: node.port,
old_status: old_status.to_string(),
new_status: node.status.to_string(),
};
self.send_event(event)
}
pub fn send_group_created(
&self,
group_id: String,
configuration_name: String,
nodes: Vec<String>,
) -> Result<(), Error> {
let event = WebhookEvent::GroupCreated {
group_id,
configuration_name,
nodes,
};
self.send_event(event)
}
pub fn send_group_destroyed(
&self,
group_id: String,
configuration_name: String,
nodes: Vec<String>,
) -> Result<(), Error> {
let event = WebhookEvent::GroupDestroyed {
group_id,
configuration_name,
nodes,
};
self.send_event(event)
}
pub fn send_metrics_updated(
&self,
pool_id: u32,
metrics: std::collections::HashMap<String, f64>,
) -> Result<(), Error> {
let event = WebhookEvent::MetricsUpdated { pool_id, metrics };
self.send_event(event)
}
pub(crate) fn handle_status_change(
&self,
node: &OrchestratorNode,
old_status: &NodeStatus,
) -> Result<(), Error> {
if *old_status == node.status
|| node.status == NodeStatus::Unhealthy
|| node.status == NodeStatus::Discovered
{
return Ok(());
}
if let Err(e) = self.send_node_status_changed(node, old_status) {
error!("Failed to send webhook to {}: {}", self.webhook_url, e);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy::primitives::Address;
use anyhow::Result;
use mockito::Server;
use std::str::FromStr;
fn create_test_node(status: NodeStatus) -> OrchestratorNode {
OrchestratorNode {
address: Address::from_str("0x1234567890123456789012345678901234567890").unwrap(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
status,
..Default::default()
}
}
impl WebhookPlugin {
async fn send_event_blocking(&self, event: WebhookEvent) -> Result<(), Error> {
let payload = WebhookPayload::new(event);
self.send_with_retry(payload).await
}
}
#[tokio::test]
async fn test_webhook_sends_on_status_change() {
let mut server = Server::new_async().await;
let _mock = server
.mock("POST", "/webhook")
.with_status(200)
.match_body(mockito::Matcher::Json(serde_json::json!({
"event": "node.status_changed",
"data": {
"node_address": "0x1234567890123456789012345678901234567890",
"ip_address": "127.0.0.1",
"port": 8080,
"old_status": "Dead",
"new_status": "Healthy"
},
"timestamp": "2024-01-01T00:00:00Z"
})))
.create();
let plugin = WebhookPlugin::new(WebhookConfig {
url: format!("{}/webhook", server.url()),
bearer_token: None,
});
let node = create_test_node(NodeStatus::Healthy);
let result = plugin.handle_status_change(&node, &NodeStatus::Dead);
assert!(result.is_ok());
}
#[tokio::test]
async fn test_webhook_sends_on_group_created() {
let mut server = Server::new_async().await;
let _mock = server
.mock("POST", "/webhook")
.with_status(200)
.match_body(mockito::Matcher::Json(serde_json::json!({
"event": "group.created",
"data": {
"group_id": "1234567890",
"configuration_name": "test_configuration",
"nodes": ["0x1234567890123456789012345678901234567890"]
},
"timestamp": "2024-01-01T00:00:00Z"
})))
.create();
let plugin = WebhookPlugin::new(WebhookConfig {
url: format!("{}/webhook", server.url()),
bearer_token: None,
});
let group_id = "1234567890";
let configuration_name = "test_configuration";
let nodes = vec!["0x1234567890123456789012345678901234567890".to_string()];
let event = WebhookEvent::GroupCreated {
group_id: group_id.to_string(),
configuration_name: configuration_name.to_string(),
nodes,
};
let result = plugin.send_event_blocking(event).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_webhook_sends_on_metrics_updated() {
let mut server = Server::new_async().await;
let mock = server
.mock("POST", "/webhook")
.with_status(200)
.match_body(mockito::Matcher::Json(serde_json::json!({
"event": "metrics.updated",
"data": {
"pool_id": 1,
"test_metric": 1.0,
"metric_2": 2.0
},
"timestamp": "2024-01-01T00:00:00Z"
})))
.create();
let plugin = WebhookPlugin::new(WebhookConfig {
url: format!("{}/webhook", server.url()),
bearer_token: None,
});
let mut metrics = std::collections::HashMap::new();
metrics.insert("test_metric".to_string(), 1.0);
metrics.insert("metric_2".to_string(), 2.0);
let event = WebhookEvent::MetricsUpdated {
pool_id: 1,
metrics,
};
let result = plugin.send_event_blocking(event).await;
assert!(result.is_ok());
mock.assert_async().await;
}
#[tokio::test]
async fn test_with_bearer_token() {
let mut server = Server::new_async().await;
let mock = server
.mock("POST", "/webhook")
.with_status(200)
.match_header("Authorization", "Bearer test_token")
.match_body(mockito::Matcher::Json(serde_json::json!({
"event": "metrics.updated",
"data": {
"pool_id": 1,
"metric_2": 2.0,
"test_metric": 1.0
},
"timestamp": "2024-01-01T00:00:00Z"
})))
.create();
let plugin = WebhookPlugin::new(WebhookConfig {
url: format!("{}/webhook", server.url()),
bearer_token: Some("test_token".to_string()),
});
let mut metrics = std::collections::HashMap::new();
metrics.insert("test_metric".to_string(), 1.0);
metrics.insert("metric_2".to_string(), 2.0);
let event = WebhookEvent::MetricsUpdated {
pool_id: 1,
metrics,
};
let result = plugin.send_event_blocking(event).await;
assert!(result.is_ok());
mock.assert_async().await;
}
#[tokio::test]
async fn test_webhook_retry_logic() {
let mut server = Server::new_async().await;
// First two attempts fail, third succeeds
let _mock1 = server
.mock("POST", "/webhook")
.with_status(500)
.expect(1)
.create();
let _mock2 = server
.mock("POST", "/webhook")
.with_status(502)
.expect(1)
.create();
let _mock3 = server
.mock("POST", "/webhook")
.with_status(200)
.expect(1)
.create();
let plugin = WebhookPlugin::new(WebhookConfig {
url: format!("{}/webhook", server.url()),
bearer_token: None,
});
let mut metrics = std::collections::HashMap::new();
metrics.insert("test_metric".to_string(), 1.0);
let result = plugin
.send_event_blocking(WebhookEvent::MetricsUpdated {
pool_id: 1,
metrics,
})
.await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_webhook_max_retries_exceeded() {
let mut server = Server::new_async().await;
// All attempts fail
let _mock = server
.mock("POST", "/webhook")
.with_status(500)
.expect(6) // 1 initial + 5 retries
.create();
let plugin = WebhookPlugin::new(WebhookConfig {
url: format!("{}/webhook", server.url()),
bearer_token: None,
});
let mut metrics = std::collections::HashMap::new();
metrics.insert("test_metric".to_string(), 1.0);
let result = plugin
.send_event_blocking(WebhookEvent::MetricsUpdated {
pool_id: 1,
metrics,
})
.await;
assert!(result.is_err());
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/orchestrator/src/p2p/mod.rs | crates/orchestrator/src/p2p/mod.rs | use anyhow::{bail, Context as _, Result};
use futures::stream::FuturesUnordered;
use futures::FutureExt;
use p2p::{Keypair, Protocols};
use shared::p2p::OutgoingRequest;
use shared::p2p::Service as P2PService;
use shared::web3::wallet::Wallet;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio_util::sync::CancellationToken;
pub struct Service {
inner: P2PService,
outgoing_message_tx: Sender<OutgoingRequest>,
invite_rx: Receiver<InviteRequest>,
get_task_logs_rx: Receiver<GetTaskLogsRequest>,
restart_task_rx: Receiver<RestartTaskRequest>,
}
impl Service {
#[allow(clippy::type_complexity)]
pub fn new(
keypair: Keypair,
port: u16,
cancellation_token: CancellationToken,
wallet: Wallet,
) -> Result<(
Self,
Sender<InviteRequest>,
Sender<GetTaskLogsRequest>,
Sender<RestartTaskRequest>,
)> {
let (invite_tx, invite_rx) = tokio::sync::mpsc::channel(100);
let (get_task_logs_tx, get_task_logs_rx) = tokio::sync::mpsc::channel(100);
let (restart_task_tx, restart_task_rx) = tokio::sync::mpsc::channel(100);
let (inner, outgoing_message_tx) = P2PService::new(
keypair,
port,
cancellation_token.clone(),
wallet,
Protocols::new()
.with_invite()
.with_get_task_logs()
.with_restart()
.with_authentication(),
)
.context("failed to create p2p service")?;
Ok((
Self {
inner,
outgoing_message_tx,
invite_rx,
get_task_logs_rx,
restart_task_rx,
},
invite_tx,
get_task_logs_tx,
restart_task_tx,
))
}
pub async fn run(self) -> Result<()> {
use futures::StreamExt as _;
let Self {
inner,
outgoing_message_tx,
mut invite_rx,
mut get_task_logs_rx,
mut restart_task_rx,
} = self;
tokio::task::spawn(inner.run());
let mut futures = FuturesUnordered::new();
loop {
tokio::select! {
Some(request) = invite_rx.recv() => {
let (incoming_resp_tx, incoming_resp_rx) = tokio::sync::oneshot::channel();
let fut = async move {
let p2p::Response::Invite(resp) = incoming_resp_rx.await.context("outgoing request tx channel was dropped")? else {
bail!("unexpected response type for invite request");
};
request.response_tx.send(resp).map_err(|_|anyhow::anyhow!("caller dropped response channel"))?;
Ok(())
}.boxed();
futures.push(fut);
let outgoing_request = OutgoingRequest {
peer_wallet_address: request.worker_wallet_address,
peer_id: request.worker_p2p_id,
multiaddrs: request.worker_addresses,
request: request.invite.into(),
response_tx: incoming_resp_tx,
};
outgoing_message_tx.send(outgoing_request).await
.context("failed to send outgoing invite request")?;
}
Some(request) = get_task_logs_rx.recv() => {
let (incoming_resp_tx, incoming_resp_rx) = tokio::sync::oneshot::channel();
let fut = async move {
let p2p::Response::GetTaskLogs(resp) = incoming_resp_rx.await.context("outgoing request tx channel was dropped")? else {
bail!("unexpected response type for get task logs request");
};
request.response_tx.send(resp).map_err(|_|anyhow::anyhow!("caller dropped response channel"))?;
Ok(())
}.boxed();
futures.push(fut);
let outgoing_request = OutgoingRequest {
peer_wallet_address: request.worker_wallet_address,
peer_id: request.worker_p2p_id,
multiaddrs: request.worker_addresses,
request: p2p::Request::GetTaskLogs,
response_tx: incoming_resp_tx,
};
outgoing_message_tx.send(outgoing_request).await
.context("failed to send outgoing get task logs request")?;
}
Some(request) = restart_task_rx.recv() => {
let (incoming_resp_tx, incoming_resp_rx) = tokio::sync::oneshot::channel();
let fut = async move {
let p2p::Response::RestartTask(resp) = incoming_resp_rx.await.context("outgoing request tx channel was dropped")? else {
bail!("unexpected response type for restart task request");
};
request.response_tx.send(resp).map_err(|_|anyhow::anyhow!("caller dropped response channel"))?;
Ok(())
}.boxed();
futures.push(fut);
let outgoing_request = OutgoingRequest {
peer_wallet_address: request.worker_wallet_address,
peer_id: request.worker_p2p_id,
multiaddrs: request.worker_addresses,
request: p2p::Request::RestartTask,
response_tx: incoming_resp_tx,
};
outgoing_message_tx.send(outgoing_request).await
.context("failed to send outgoing restart task request")?;
}
Some(res) = futures.next() => {
if let Err(e) = res {
log::error!("failed to handle response conversion: {e}");
}
}
}
}
}
}
pub struct InviteRequest {
pub(crate) worker_wallet_address: alloy::primitives::Address,
pub(crate) worker_p2p_id: String,
pub(crate) worker_addresses: Vec<String>,
pub(crate) invite: p2p::InviteRequest,
pub(crate) response_tx: tokio::sync::oneshot::Sender<p2p::InviteResponse>,
}
pub struct GetTaskLogsRequest {
pub(crate) worker_wallet_address: alloy::primitives::Address,
pub(crate) worker_p2p_id: String,
pub(crate) worker_addresses: Vec<String>,
pub(crate) response_tx: tokio::sync::oneshot::Sender<p2p::GetTaskLogsResponse>,
}
pub struct RestartTaskRequest {
pub(crate) worker_wallet_address: alloy::primitives::Address,
pub(crate) worker_p2p_id: String,
pub(crate) worker_addresses: Vec<String>,
pub(crate) response_tx: tokio::sync::oneshot::Sender<p2p::RestartTaskResponse>,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/lib.rs | crates/validator/src/lib.rs | mod metrics;
mod p2p;
mod store;
mod validators;
pub use metrics::export_metrics;
pub use metrics::MetricsContext;
pub use p2p::Service as P2PService;
pub use store::redis::RedisStore;
pub use validators::hardware::HardwareValidator;
pub use validators::synthetic_data::types::InvalidationType;
pub use validators::synthetic_data::SyntheticDataValidator;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/main.rs | crates/validator/src/main.rs | use actix_web::{web, App, HttpRequest, HttpResponse, HttpServer, Responder};
use alloy::primitives::utils::Unit;
use alloy::primitives::{Address, U256};
use anyhow::{Context, Result};
use clap::Parser;
use log::{debug, LevelFilter};
use log::{error, info};
use serde_json::json;
use shared::models::api::ApiResponse;
use shared::models::node::DiscoveryNode;
use shared::security::api_key_middleware::ApiKeyMiddleware;
use shared::security::request_signer::sign_request_with_nonce;
use shared::utils::google_cloud::GcsStorageProvider;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use std::sync::atomic::{AtomicI64, Ordering};
use std::sync::Arc;
use std::time::Duration;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
use tokio::signal::unix::{signal, SignalKind};
use tokio_util::sync::CancellationToken;
use url::Url;
use validator::{
export_metrics, HardwareValidator, InvalidationType, MetricsContext, P2PService, RedisStore,
SyntheticDataValidator,
};
// Track the last time the validation loop ran
static LAST_VALIDATION_TIMESTAMP: AtomicI64 = AtomicI64::new(0);
// Maximum allowed time between validation loops (2 minutes)
const MAX_VALIDATION_INTERVAL_SECS: i64 = 120;
// Track the last loop duration in milliseconds
static LAST_LOOP_DURATION_MS: AtomicI64 = AtomicI64::new(0);
async fn get_rejections(
req: HttpRequest,
validator: web::Data<Option<SyntheticDataValidator<shared::web3::wallet::WalletProvider>>>,
) -> impl Responder {
match validator.as_ref() {
Some(validator) => {
// Parse query parameters
let query = req.query_string();
let limit = parse_limit_param(query).unwrap_or(100); // Default limit of 100
let result = if limit > 0 && limit < 1000 {
// Use the optimized recent rejections method for reasonable limits
validator.get_recent_rejections(limit as isize).await
} else {
// Fallback to all rejections (but warn about potential performance impact)
if limit >= 1000 {
info!("Large limit requested ({limit}), this may impact performance");
}
validator.get_all_rejections().await
};
match result {
Ok(rejections) => HttpResponse::Ok().json(ApiResponse {
success: true,
data: rejections,
}),
Err(e) => {
error!("Failed to get rejections: {e}");
HttpResponse::InternalServerError().json(ApiResponse {
success: false,
data: format!("Failed to get rejections: {e}"),
})
}
}
}
None => HttpResponse::ServiceUnavailable().json(ApiResponse {
success: false,
data: "Synthetic data validator not available",
}),
}
}
fn parse_limit_param(query: &str) -> Option<u32> {
for pair in query.split('&') {
if let Some((key, value)) = pair.split_once('=') {
if key == "limit" {
return value.parse::<u32>().ok();
}
}
}
None
}
async fn health_check() -> impl Responder {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
let last_validation = LAST_VALIDATION_TIMESTAMP.load(Ordering::Relaxed);
let last_duration_ms = LAST_LOOP_DURATION_MS.load(Ordering::Relaxed);
if last_validation == 0 {
// Validation hasn't run yet, but we're still starting up
return HttpResponse::Ok().json(json!({
"status": "starting",
"message": "Validation loop hasn't started yet"
}));
}
let elapsed = now - last_validation;
if elapsed > MAX_VALIDATION_INTERVAL_SECS {
return HttpResponse::ServiceUnavailable().json(json!({
"status": "error",
"message": format!("Validation loop hasn't run in {} seconds (max allowed: {})", elapsed, MAX_VALIDATION_INTERVAL_SECS),
"last_loop_duration_ms": last_duration_ms
}));
}
HttpResponse::Ok().json(json!({
"status": "ok",
"last_validation_seconds_ago": elapsed,
"last_loop_duration_ms": last_duration_ms
}))
}
#[derive(Parser)]
struct Args {
/// RPC URL
#[arg(short = 'r', long, default_value = "http://localhost:8545")]
rpc_url: String,
/// Owner key
#[arg(short = 'k', long)]
validator_key: String,
/// Discovery URLs (comma-separated)
#[arg(long, default_value = "http://localhost:8089", value_delimiter = ',')]
discovery_urls: Vec<String>,
/// Ability to disable hardware validation
#[arg(long, default_value = "false")]
disable_hardware_validation: bool,
/// Optional: Pool Id for work validation
/// If not provided, the validator will not validate work
#[arg(long, default_value = None)]
pool_id: Option<String>,
/// Optional: Toploc Grace Interval in seconds between work validation requests
#[arg(long, default_value = "15")]
toploc_grace_interval: u64,
/// Optional: interval in minutes of max age of work on chain
#[arg(long, default_value = "15")]
toploc_work_validation_interval: u64,
/// Optional: interval in minutes of max age of work on chain
#[arg(long, default_value = "120")]
toploc_work_validation_unknown_status_expiry_seconds: u64,
/// Disable toploc ejection
/// If true, the validator will not invalidate work on toploc
#[arg(long, default_value = "false")]
disable_toploc_invalidation: bool,
/// Optional: batch trigger size
#[arg(long, default_value = "10")]
batch_trigger_size: usize,
/// Grouping
#[arg(long, default_value = "false")]
use_grouping: bool,
/// Grace period in minutes for incomplete groups to recover (0 = disabled)
#[arg(long, default_value = "0")]
incomplete_group_grace_period_minutes: u64,
/// Optional: toploc invalidation type
#[arg(long, default_value = "hard")]
toploc_invalidation_type: InvalidationType,
/// Optional: work unit invalidation type
#[arg(long, default_value = "hard")]
work_unit_invalidation_type: InvalidationType,
/// Optional: Validator penalty in whole tokens
/// Note: This value will be multiplied by 10^18 (1 token = 10^18 wei)
#[arg(long, default_value = "200")]
validator_penalty: u64,
/// Temporary: S3 bucket name
#[arg(long, default_value = None)]
bucket_name: Option<String>,
/// Log level
#[arg(short = 'l', long, default_value = "info")]
log_level: String,
/// Redis URL
#[arg(long, default_value = "redis://localhost:6380")]
redis_url: String,
/// Libp2p port
#[arg(long, default_value = "4003")]
libp2p_port: u16,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args = Args::parse();
let log_level = match args.log_level.as_str() {
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"info" => LevelFilter::Info,
"debug" => LevelFilter::Debug,
"trace" => LevelFilter::Trace,
_ => anyhow::bail!("invalid log level: {}", args.log_level),
};
env_logger::Builder::new()
.filter_level(log_level)
.filter_module("iroh", log::LevelFilter::Warn)
.filter_module("iroh_net", log::LevelFilter::Warn)
.filter_module("iroh_quinn", log::LevelFilter::Warn)
.filter_module("iroh_base", log::LevelFilter::Warn)
.filter_module("tracing::span", log::LevelFilter::Warn)
.format_timestamp(None)
.init();
let cancellation_token = CancellationToken::new();
let mut sigterm = signal(SignalKind::terminate())?;
let mut sigint = signal(SignalKind::interrupt())?;
let mut sighup = signal(SignalKind::hangup())?;
let mut sigquit = signal(SignalKind::quit())?;
let signal_token = cancellation_token.clone();
let cancellation_token_clone = cancellation_token.clone();
tokio::spawn(async move {
tokio::select! {
_ = sigterm.recv() => {
log::info!("Received termination signal");
}
_ = sigint.recv() => {
log::info!("Received interrupt signal");
}
_ = sighup.recv() => {
log::info!("Received hangup signal");
}
_ = sigquit.recv() => {
log::info!("Received quit signal");
}
}
signal_token.cancel();
});
let private_key_validator = args.validator_key;
let rpc_url: Url = args.rpc_url.parse().unwrap();
let discovery_urls = args.discovery_urls;
let redis_store = RedisStore::new(&args.redis_url);
let validator_wallet = Wallet::new(&private_key_validator, rpc_url).unwrap_or_else(|err| {
error!("Error creating wallet: {err:?}");
std::process::exit(1);
});
let mut contract_builder = ContractBuilder::new(validator_wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.with_domain_registry()
.with_stake_manager();
let contracts = contract_builder.build_partial().unwrap();
let metrics_ctx =
MetricsContext::new(validator_wallet.address().to_string(), args.pool_id.clone());
// Initialize P2P client if enabled
let keypair = p2p::Keypair::generate_ed25519();
let (p2p_service, hardware_challenge_tx) = {
match P2PService::new(
keypair,
args.libp2p_port,
cancellation_token.clone(),
validator_wallet.clone(),
) {
Ok(res) => {
info!("p2p service initialized successfully");
res
}
Err(e) => {
error!("failed to initialize p2p service: {e}");
std::process::exit(1);
}
}
};
tokio::task::spawn(p2p_service.run());
if let Some(pool_id) = args.pool_id.clone() {
let pool = match contracts
.compute_pool
.get_pool_info(U256::from_str(&pool_id).unwrap())
.await
{
Ok(pool_info) => pool_info,
Err(e) => {
error!("Failed to get pool info: {e:?}");
std::process::exit(1);
}
};
let domain_id: u32 = pool.domain_id.try_into().unwrap();
let domain = contracts
.domain_registry
.as_ref()
.unwrap()
.get_domain(domain_id)
.await
.unwrap();
contract_builder =
contract_builder.with_synthetic_data_validator(Some(domain.validation_logic));
}
let contracts = contract_builder.build().unwrap();
let hardware_validator = HardwareValidator::new(contracts.clone(), hardware_challenge_tx);
let synthetic_validator = if let Some(pool_id) = args.pool_id.clone() {
let penalty = U256::from(args.validator_penalty) * Unit::ETHER.wei();
match contracts.synthetic_data_validator.clone() {
Some(validator) => {
info!(
"Synthetic validator has penalty: {} ({})",
penalty, args.validator_penalty
);
let Ok(toploc_configs) = std::env::var("TOPLOC_CONFIGS") else {
error!("Toploc configs are required but not provided in environment");
std::process::exit(1);
};
info!("Toploc configs: {toploc_configs}");
let configs = match serde_json::from_str(&toploc_configs) {
Ok(configs) => configs,
Err(e) => {
error!("Failed to parse toploc configs: {e}");
std::process::exit(1);
}
};
let s3_credentials = std::env::var("S3_CREDENTIALS").ok();
match (args.bucket_name.as_ref(), s3_credentials) {
(Some(bucket_name), Some(s3_credentials))
if !bucket_name.is_empty() && !s3_credentials.is_empty() =>
{
let gcs_storage = GcsStorageProvider::new(bucket_name, &s3_credentials)
.await
.unwrap_or_else(|_| panic!("Failed to create GCS storage provider"));
let storage_provider = Arc::new(gcs_storage);
Some(SyntheticDataValidator::new(
pool_id,
validator,
contracts.prime_network.clone(),
configs,
penalty,
storage_provider,
redis_store,
cancellation_token,
args.toploc_work_validation_interval,
args.toploc_work_validation_unknown_status_expiry_seconds,
args.toploc_grace_interval,
args.batch_trigger_size,
args.use_grouping,
args.disable_toploc_invalidation,
args.incomplete_group_grace_period_minutes,
args.toploc_invalidation_type,
args.work_unit_invalidation_type,
Some(metrics_ctx.clone()),
))
}
_ => {
info!("Bucket name or S3 credentials not provided, skipping synthetic data validation");
None
}
}
}
None => {
error!("Synthetic data validator not found");
std::process::exit(1);
}
}
} else {
None
};
// Start HTTP server with access to the validator
let validator_for_server = synthetic_validator.clone();
tokio::spawn(async move {
let key = std::env::var("VALIDATOR_API_KEY").unwrap_or_default();
let api_key_middleware = Arc::new(ApiKeyMiddleware::new(key));
if let Err(e) = HttpServer::new(move || {
App::new()
.app_data(web::Data::new(validator_for_server.clone()))
.route("/health", web::get().to(health_check))
.route(
"/rejections",
web::get()
.to(get_rejections)
.wrap(api_key_middleware.clone()),
)
.route(
"/metrics",
web::get().to(|| async {
match export_metrics() {
Ok(metrics) => {
HttpResponse::Ok().content_type("text/plain").body(metrics)
}
Err(e) => {
error!("Error exporting metrics: {e:?}");
HttpResponse::InternalServerError().finish()
}
}
}),
)
})
.bind("0.0.0.0:9879")
.expect("Failed to bind health check server")
.run()
.await
{
error!("Actix server error: {e:?}");
}
});
loop {
if cancellation_token_clone.is_cancelled() {
log::info!("Validation loop is stopping due to cancellation signal");
break;
}
// Start timing the loop
let loop_start = Instant::now();
// Update the last validation timestamp
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs() as i64;
LAST_VALIDATION_TIMESTAMP.store(now, Ordering::Relaxed);
if let Some(validator) = synthetic_validator.clone() {
if let Err(e) = validator.validate_work().await {
error!("Failed to validate work: {e}");
}
}
if !args.disable_hardware_validation {
async fn _fetch_nodes_from_discovery_url(
discovery_url: &str,
validator_wallet: &Wallet,
) -> Result<Vec<DiscoveryNode>> {
let address = validator_wallet
.wallet
.default_signer()
.address()
.to_string();
let discovery_route = "/api/validator";
let signature = sign_request_with_nonce(discovery_route, validator_wallet, None)
.await
.map_err(|e| anyhow::anyhow!("{}", e))?;
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"x-address",
reqwest::header::HeaderValue::from_str(&address)
.context("Failed to create address header")?,
);
headers.insert(
"x-signature",
reqwest::header::HeaderValue::from_str(&signature.signature)
.context("Failed to create signature header")?,
);
debug!("Fetching nodes from: {discovery_url}{discovery_route}");
let response = reqwest::Client::new()
.get(format!("{discovery_url}{discovery_route}"))
.query(&[("nonce", signature.nonce)])
.headers(headers)
.timeout(Duration::from_secs(10))
.send()
.await
.context("Failed to fetch nodes")?;
let response_text = response
.text()
.await
.context("Failed to get response text")?;
let parsed_response: ApiResponse<Vec<DiscoveryNode>> =
serde_json::from_str(&response_text).context("Failed to parse response")?;
if !parsed_response.success {
error!("Failed to fetch nodes from {discovery_url}: {parsed_response:?}");
return Ok(vec![]);
}
Ok(parsed_response.data)
}
let nodes = match async {
let mut all_nodes = Vec::new();
let mut any_success = false;
for discovery_url in &discovery_urls {
match _fetch_nodes_from_discovery_url(discovery_url, &validator_wallet).await {
Ok(nodes) => {
debug!(
"Successfully fetched {} nodes from {}",
nodes.len(),
discovery_url
);
all_nodes.extend(nodes);
any_success = true;
}
Err(e) => {
error!("Failed to fetch nodes from {discovery_url}: {e:#}");
}
}
}
if !any_success {
error!("Failed to fetch nodes from all discovery services");
return Ok::<Vec<DiscoveryNode>, anyhow::Error>(vec![]);
}
// Remove duplicates based on node ID
let mut unique_nodes = Vec::new();
let mut seen_ids = std::collections::HashSet::new();
for node in all_nodes {
if seen_ids.insert(node.node.id.clone()) {
unique_nodes.push(node);
}
}
debug!(
"Total unique nodes after deduplication: {}",
unique_nodes.len()
);
Ok(unique_nodes)
}
.await
{
Ok(n) => n,
Err(e) => {
error!("Error in node fetching loop: {e:#}");
std::thread::sleep(std::time::Duration::from_secs(10));
continue;
}
};
// Ensure nodes have enough stake
let mut nodes_with_enough_stake = Vec::new();
let Some(stake_manager) = contracts.stake_manager.as_ref() else {
error!("Stake manager contract not initialized");
continue;
};
let mut provider_stake_cache: std::collections::HashMap<String, (U256, U256)> =
std::collections::HashMap::new();
for node in nodes {
let provider_address_str = &node.node.provider_address;
let provider_address = match Address::from_str(provider_address_str) {
Ok(address) => address,
Err(e) => {
error!("Failed to parse provider address {provider_address_str}: {e}");
continue;
}
};
let (stake, required_stake) =
if let Some(&cached_info) = provider_stake_cache.get(provider_address_str) {
cached_info
} else {
let stake = stake_manager
.get_stake(provider_address)
.await
.unwrap_or_default();
let total_compute = contracts
.compute_registry
.get_provider_total_compute(provider_address)
.await
.unwrap_or_default();
let required_stake = stake_manager
.calculate_stake(U256::from(0), total_compute)
.await
.unwrap_or_default();
provider_stake_cache
.insert(provider_address_str.clone(), (stake, required_stake));
(stake, required_stake)
};
if stake >= required_stake {
nodes_with_enough_stake.push(node);
} else {
info!(
"Node {} has insufficient stake: {} (required: {})",
node.node.id,
stake / Unit::ETHER.wei(),
required_stake / Unit::ETHER.wei()
);
}
}
if let Err(e) = hardware_validator
.validate_nodes(nodes_with_enough_stake)
.await
{
error!("Error validating nodes: {e:#}");
}
}
// Calculate and store loop duration
let loop_duration = loop_start.elapsed();
let loop_duration_ms = loop_duration.as_millis() as i64;
LAST_LOOP_DURATION_MS.store(loop_duration_ms, Ordering::Relaxed);
metrics_ctx.record_validation_loop_duration(loop_duration.as_secs_f64());
info!("Validation loop completed in {loop_duration_ms}ms");
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
}
Ok(())
}
#[cfg(test)]
mod tests {
use actix_web::{test, App};
use actix_web::{
web::{self, post},
HttpResponse, Scope,
};
use p2p::{calc_matrix, ChallengeRequest, ChallengeResponse, FixedF64};
async fn handle_challenge(challenge: web::Json<ChallengeRequest>) -> HttpResponse {
let result = calc_matrix(&challenge);
HttpResponse::Ok().json(result)
}
fn challenge_routes() -> Scope {
web::scope("/challenge")
.route("", post().to(handle_challenge))
.route("/", post().to(handle_challenge))
}
#[actix_web::test]
async fn test_challenge_route() {
let app = test::init_service(App::new().service(challenge_routes())).await;
let vec_a = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0];
let vec_b = [9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0];
// convert vectors to FixedF64
let data_a: Vec<FixedF64> = vec_a.iter().map(|x| FixedF64(*x)).collect();
let data_b: Vec<FixedF64> = vec_b.iter().map(|x| FixedF64(*x)).collect();
let challenge_request = ChallengeRequest {
rows_a: 3,
cols_a: 3,
data_a,
rows_b: 3,
cols_b: 3,
data_b,
timestamp: None,
};
let req = test::TestRequest::post()
.uri("/challenge")
.set_json(&challenge_request)
.to_request();
let resp: ChallengeResponse = test::call_and_read_body_json(&app, req).await;
let expected_response = calc_matrix(&challenge_request);
assert_eq!(resp.result, expected_response.result);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/metrics.rs | crates/validator/src/metrics.rs | use lazy_static::lazy_static;
use prometheus::{
register_counter_vec, register_gauge_vec, register_histogram_vec, CounterVec, GaugeVec,
HistogramVec, TextEncoder,
};
lazy_static! {
pub static ref VALIDATION_LOOP_DURATION: HistogramVec = register_histogram_vec!(
"validator_validation_loop_duration_seconds",
"Duration of the validation loop",
&["validator_id", "pool_id"],
vec![0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 30.0, 60.0, 120.0, 300.0]
).unwrap();
// === SYNTHETIC DATA VALIDATION METRICS ===
// Total work keys invalidated
pub static ref WORK_KEYS_INVALIDATED: CounterVec = register_counter_vec!(
"validator_work_keys_invalidated_total",
"Total work keys invalidated",
&["validator_id", "pool_id"]
).unwrap();
pub static ref WORK_KEYS_SOFT_INVALIDATED: CounterVec = register_counter_vec!(
"validator_work_keys_soft_invalidated_total",
"Total work keys soft invalidated",
&["validator_id", "pool_id", "group_key"]
).unwrap();
// Errors in synthetic data validation
/// Total work keys processed by the validator
pub static ref WORK_KEYS_TO_PROCESS: GaugeVec = register_gauge_vec!(
"validator_work_keys_to_process",
"Total work keys to process in current validation loop (depends on settings)",
&["validator_id", "pool_id"]
).unwrap();
pub static ref ERRORS: CounterVec = register_counter_vec!(
"validator_errors_total",
"Total errors",
&["validator_id", "pool_id", "error"]
).unwrap();
pub static ref API_DURATION: HistogramVec = register_histogram_vec!(
"validator_api_duration_seconds",
"API request duration",
&["validator_id", "pool_id", "endpoint"], // endpoint: validate, validategroup, status, statusgroup
vec![0.1, 0.5, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0]
).unwrap();
pub static ref API_REQUESTS: CounterVec = register_counter_vec!(
"validator_api_requests_total",
"Total API requests",
&["validator_id", "pool_id", "endpoint", "status"]
).unwrap();
pub static ref GROUP_VALIDATIONS: CounterVec = register_counter_vec!(
"validator_group_validations_total",
"Total group validations by result",
&["validator_id", "pool_id", "group_id", "toploc_config_name", "result"] // result: accept, reject, crashed, pending, unknown
).unwrap();
pub static ref GROUP_WORK_UNITS_CHECK_TOTAL: CounterVec = register_counter_vec!(
"validator_group_work_units_check_total",
"Whether the work units match the group size",
&["validator_id", "pool_id", "group_id", "toploc_config_name", "result"] // result: match, mismatch
).unwrap();
}
pub fn export_metrics() -> Result<String, prometheus::Error> {
let encoder = TextEncoder::new();
let metric_families = prometheus::gather();
encoder.encode_to_string(&metric_families)
}
#[derive(Clone, Debug)]
pub struct MetricsContext {
pub validator_id: String,
pub pool_id: Option<String>,
}
impl MetricsContext {
pub fn new(validator_id: String, pool_id: Option<String>) -> Self {
Self {
validator_id,
pool_id,
}
}
pub fn record_work_keys_to_process(&self, count: f64) {
if let Some(pool_id) = &self.pool_id {
WORK_KEYS_TO_PROCESS
.with_label_values(&[&self.validator_id as &str, pool_id])
.set(count);
}
}
pub fn record_work_key_invalidation(&self) {
if let Some(pool_id) = &self.pool_id {
WORK_KEYS_INVALIDATED
.with_label_values(&[&self.validator_id as &str, pool_id])
.inc();
}
}
pub fn record_group_soft_invalidation(&self, group_key: &str) {
if let Some(pool_id) = &self.pool_id {
WORK_KEYS_SOFT_INVALIDATED
.with_label_values(&[&self.validator_id as &str, pool_id, group_key])
.inc();
}
}
pub fn record_work_key_error(&self, error: &str) {
if let Some(pool_id) = &self.pool_id {
ERRORS
.with_label_values(&[&self.validator_id as &str, pool_id, error])
.inc();
}
}
pub fn record_group_validation_status(
&self,
group_id: &str,
toploc_config_name: &str,
result: &str,
) {
if let Some(pool_id) = &self.pool_id {
GROUP_VALIDATIONS
.with_label_values(&[
&self.validator_id as &str,
pool_id,
group_id,
toploc_config_name,
result,
])
.inc();
}
}
pub fn record_group_work_units_check_result(
&self,
group_id: &str,
toploc_config_name: &str,
result: &str,
) {
if let Some(pool_id) = &self.pool_id {
GROUP_WORK_UNITS_CHECK_TOTAL
.with_label_values(&[
&self.validator_id as &str,
pool_id,
group_id,
toploc_config_name,
result,
])
.inc();
}
}
pub fn record_validation_loop_duration(&self, duration_s: f64) {
let pool_id = self.pool_id.as_deref().unwrap_or("hw_validator");
VALIDATION_LOOP_DURATION
.with_label_values(&[&self.validator_id as &str, pool_id])
.observe(duration_s);
}
pub fn record_api_duration(&self, endpoint: &str, duration_s: f64) {
let pool_id = self.pool_id.as_deref().unwrap_or("hw_validator");
API_DURATION
.with_label_values(&[&self.validator_id as &str, pool_id, endpoint])
.observe(duration_s);
}
pub fn record_api_request(&self, endpoint: &str, status: &str) {
let pool_id = self.pool_id.as_deref().unwrap_or("hw_validator");
API_REQUESTS
.with_label_values(&[&self.validator_id as &str, pool_id, endpoint, status])
.inc();
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/store/mod.rs | crates/validator/src/store/mod.rs | pub(crate) mod redis;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/store/redis.rs | crates/validator/src/store/redis.rs | #[cfg(test)]
use log::debug;
use log::info;
use redis::Client;
#[cfg(test)]
use redis_test::server::RedisServer;
#[cfg(test)]
use std::sync::Arc;
#[cfg(test)]
use std::thread;
#[cfg(test)]
use std::time::Duration;
#[derive(Clone)]
pub struct RedisStore {
pub client: Client,
#[allow(dead_code)]
#[cfg(test)]
server: Arc<RedisServer>,
}
impl RedisStore {
pub fn new(redis_url: &str) -> Self {
match Client::open(redis_url) {
Ok(client) => {
info!("Successfully connected to Redis at {redis_url}");
Self {
client,
#[cfg(test)]
server: Arc::new(RedisServer::new()),
}
}
Err(e) => {
panic!("Redis connection error: {e}");
}
}
}
#[cfg(test)]
pub fn new_test() -> Self {
let server = RedisServer::new();
// Get the server address
let (host, port) = match server.client_addr() {
redis::ConnectionAddr::Tcp(host, port) => (host.clone(), *port),
_ => panic!("Expected TCP connection"),
};
let redis_url = format!("redis://{}:{}", host, port);
debug!("Starting test Redis server at {}", redis_url);
// Add a small delay to ensure server is ready
thread::sleep(Duration::from_millis(100));
// Try to connect with retry logic
let client = loop {
if let Ok(client) = Client::open(redis_url.clone()) {
// Verify connection works
if let Ok(mut conn) = client.get_connection() {
if redis::cmd("PING").query::<String>(&mut conn).is_ok() {
break client;
}
}
}
thread::sleep(Duration::from_millis(100));
};
Self {
client,
server: Arc::new(server),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/hardware_challenge.rs | crates/validator/src/validators/hardware_challenge.rs | use alloy::primitives::Address;
use anyhow::{bail, Context as _, Result};
use log::{error, info};
use rand::{rng, Rng};
use shared::models::node::DiscoveryNode;
use std::str::FromStr;
use crate::p2p::HardwareChallengeRequest;
pub(crate) struct HardwareChallenge {
challenge_tx: tokio::sync::mpsc::Sender<HardwareChallengeRequest>,
}
impl HardwareChallenge {
pub(crate) fn new(challenge_tx: tokio::sync::mpsc::Sender<HardwareChallengeRequest>) -> Self {
Self { challenge_tx }
}
pub(crate) async fn challenge_node(&self, node: &DiscoveryNode) -> Result<()> {
// Check if node has P2P ID and addresses
let p2p_id = node
.node
.worker_p2p_id
.clone()
.ok_or_else(|| anyhow::anyhow!("Node {} does not have P2P ID", node.id))?;
let p2p_addresses = node
.node
.worker_p2p_addresses
.clone()
.ok_or_else(|| anyhow::anyhow!("Node {} does not have P2P addresses", node.id))?;
// create random challenge matrix
let challenge_matrix = self.random_challenge(3, 3, 3, 3);
let challenge_expected = p2p::calc_matrix(&challenge_matrix);
// Add timestamp to the challenge
let current_time = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let mut challenge_with_timestamp = challenge_matrix.clone();
challenge_with_timestamp.timestamp = Some(current_time);
let node_address = Address::from_str(&node.node.id)
.map_err(|e| anyhow::anyhow!("Failed to parse node address {}: {}", node.node.id, e))?;
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
let hardware_challenge = HardwareChallengeRequest {
worker_wallet_address: node_address,
worker_p2p_id: p2p_id,
worker_addresses: p2p_addresses,
challenge: challenge_with_timestamp,
response_tx,
};
// Send challenge via P2P
self.challenge_tx
.send(hardware_challenge)
.await
.context("failed to send hardware challenge request to p2p service")?;
let resp = response_rx
.await
.context("failed to receive response from node")?;
if challenge_expected.result == resp.result {
info!("Challenge for node {} successful", node.id);
} else {
error!(
"Challenge failed for node {}: expected {:?}, got {:?}",
node.id, challenge_expected.result, resp.result
);
bail!("Node failed challenge");
}
Ok(())
}
fn random_challenge(
&self,
rows_a: usize,
cols_a: usize,
rows_b: usize,
cols_b: usize,
) -> p2p::ChallengeRequest {
use p2p::FixedF64;
let mut rng = rng();
let data_a_vec: Vec<f64> = (0..(rows_a * cols_a))
.map(|_| rng.random_range(0.0..1.0))
.collect();
let data_b_vec: Vec<f64> = (0..(rows_b * cols_b))
.map(|_| rng.random_range(0.0..1.0))
.collect();
// convert to FixedF64
let data_a: Vec<FixedF64> = data_a_vec.iter().map(|x| FixedF64(*x)).collect();
let data_b: Vec<FixedF64> = data_b_vec.iter().map(|x| FixedF64(*x)).collect();
p2p::ChallengeRequest {
rows_a,
cols_a,
data_a,
rows_b,
cols_b,
data_b,
timestamp: None,
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/hardware.rs | crates/validator/src/validators/hardware.rs | use alloy::primitives::Address;
use anyhow::bail;
use anyhow::Result;
use log::{debug, error, info};
use shared::{
models::node::DiscoveryNode,
web3::{contracts::core::builder::Contracts, wallet::WalletProvider},
};
use crate::p2p::HardwareChallengeRequest;
use crate::validators::hardware_challenge::HardwareChallenge;
/// Hardware validator implementation
///
/// NOTE: This is a temporary implementation that will be replaced with a proper
/// hardware validator in the near future. The current implementation only performs
/// basic matrix multiplication challenges and does not verify actual hardware specs.
pub struct HardwareValidator {
contracts: Contracts<WalletProvider>,
challenge_tx: tokio::sync::mpsc::Sender<HardwareChallengeRequest>,
}
impl HardwareValidator {
pub fn new(
contracts: Contracts<WalletProvider>,
challenge_tx: tokio::sync::mpsc::Sender<HardwareChallengeRequest>,
) -> Self {
Self {
contracts,
challenge_tx,
}
}
async fn validate_node(&self, node: DiscoveryNode) -> Result<()> {
let node_address = match node.id.trim_start_matches("0x").parse::<Address>() {
Ok(addr) => addr,
Err(e) => {
bail!("failed to parse node address: {e:?}");
}
};
let provider_address = match node
.provider_address
.trim_start_matches("0x")
.parse::<Address>()
{
Ok(addr) => addr,
Err(e) => {
bail!("failed to parse provider address: {e:?}");
}
};
// Perform hardware challenge
let hardware_challenge = HardwareChallenge::new(self.challenge_tx.clone());
let challenge_result = hardware_challenge.challenge_node(&node).await;
if let Err(e) = challenge_result {
bail!("failed to challenge node: {e:?}");
}
debug!("Sending validation transaction for node {}", node.id);
if let Err(e) = self
.contracts
.prime_network
.validate_node(provider_address, node_address)
.await
{
error!("Failed to validate node: {e}");
return Err(anyhow::anyhow!("Failed to validate node: {}", e));
}
// Small delay to ensure nonce incrementation
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
info!("Node {} successfully validated", node.id);
Ok(())
}
pub async fn validate_nodes(&self, nodes: Vec<DiscoveryNode>) -> Result<()> {
let non_validated: Vec<_> = nodes.into_iter().filter(|n| !n.is_validated).collect();
debug!("Non validated nodes: {non_validated:?}");
info!("Starting validation for {} nodes", non_validated.len());
// Process non validated nodes sequentially as simple fix
// to avoid nonce conflicts for now. Will sophisticate this in the future
for node in non_validated {
let node_id = node.id.clone();
match self.validate_node(node).await {
Ok(_) => (),
Err(e) => {
error!("Failed to validate node {node_id}: {e}");
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use shared::models::node::Node;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::sync::Arc;
use url::Url;
#[tokio::test]
async fn test_challenge_node() {
let coordinator_key = "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97";
let rpc_url: Url = Url::parse("http://localhost:8545").unwrap();
let coordinator_wallet = Arc::new(Wallet::new(coordinator_key, rpc_url).unwrap());
let contracts = ContractBuilder::new(coordinator_wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let (tx, _rx) = tokio::sync::mpsc::channel(100);
let validator = HardwareValidator::new(contracts, tx);
let fake_discovery_node1 = DiscoveryNode {
is_validated: false,
node: Node {
ip_address: "192.168.1.1".to_string(),
port: 8080,
compute_pool_id: 1,
id: Address::ZERO.to_string(),
provider_address: Address::ZERO.to_string(),
..Default::default()
},
is_active: true,
is_provider_whitelisted: true,
is_blacklisted: false,
..Default::default()
};
let fake_discovery_node2 = DiscoveryNode {
is_validated: false,
node: Node {
ip_address: "192.168.1.2".to_string(),
port: 8080,
compute_pool_id: 1,
id: Address::ZERO.to_string(),
provider_address: Address::ZERO.to_string(),
..Default::default()
},
is_active: true,
is_provider_whitelisted: true,
is_blacklisted: false,
..Default::default()
};
let nodes = vec![fake_discovery_node1, fake_discovery_node2];
let start_time = std::time::Instant::now();
let result = validator.validate_nodes(nodes).await;
let elapsed = start_time.elapsed();
assert!(elapsed < std::time::Duration::from_secs(11));
println!("Validation took: {:?}", elapsed);
assert!(result.is_ok());
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/mod.rs | crates/validator/src/validators/mod.rs | pub(crate) mod hardware;
pub(crate) mod hardware_challenge;
pub(crate) mod synthetic_data;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/synthetic_data/types.rs | crates/validator/src/validators/synthetic_data/types.rs | use anyhow::Error;
use regex::Regex;
use serde::{Deserialize, Serialize};
use shared::web3::contracts::implementations::work_validators::synthetic_data_validator::WorkInfo;
use std::fmt;
use std::str::FromStr;
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
pub(crate) enum ValidationResult {
Accept,
Reject,
Crashed,
Pending,
Unknown,
Invalidated,
IncompleteGroup,
FileNameResolutionFailed,
}
impl fmt::Display for ValidationResult {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ValidationResult::Accept => write!(f, "accept"),
ValidationResult::Reject => write!(f, "reject"),
ValidationResult::Crashed => write!(f, "crashed"),
ValidationResult::Pending => write!(f, "pending"),
ValidationResult::Unknown => write!(f, "unknown"),
ValidationResult::Invalidated => write!(f, "invalidated"),
ValidationResult::IncompleteGroup => write!(f, "incomplete_group"),
ValidationResult::FileNameResolutionFailed => write!(f, "filename_resolution_failed"),
}
}
}
#[derive(Debug, Serialize, Deserialize, PartialEq)]
pub(crate) struct WorkValidationInfo {
pub status: ValidationResult,
pub reason: Option<String>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct RejectionInfo {
pub work_key: String,
pub reason: Option<String>,
pub timestamp: Option<i64>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, clap::ValueEnum)]
pub enum InvalidationType {
Soft,
Hard,
}
impl fmt::Display for InvalidationType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InvalidationType::Soft => write!(f, "soft"),
InvalidationType::Hard => write!(f, "hard"),
}
}
}
#[derive(Debug)]
pub(crate) enum ProcessWorkKeyError {
FileNameResolutionError(String),
ValidationPollingError(String),
InvalidatingWorkError(String),
MaxAttemptsReached(String),
GenericError(anyhow::Error),
NoMatchingToplocConfig(String),
}
impl From<anyhow::Error> for ProcessWorkKeyError {
fn from(err: anyhow::Error) -> Self {
ProcessWorkKeyError::GenericError(err)
}
}
impl fmt::Display for ProcessWorkKeyError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ProcessWorkKeyError::FileNameResolutionError(msg) => {
write!(f, "File name resolution error: {msg}")
}
ProcessWorkKeyError::ValidationPollingError(msg) => {
write!(f, "Validation polling error: {msg}")
}
ProcessWorkKeyError::InvalidatingWorkError(msg) => {
write!(f, "Invalidating work error: {msg}")
}
ProcessWorkKeyError::MaxAttemptsReached(msg) => {
write!(f, "Max attempts reached: {msg}")
}
ProcessWorkKeyError::GenericError(err) => {
write!(f, "Generic error: {err}")
}
ProcessWorkKeyError::NoMatchingToplocConfig(msg) => {
write!(f, "No matching toploc config: {msg:?}")
}
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct GroupInformation {
pub group_file_name: String,
pub prefix: String,
pub group_id: String,
pub group_size: u32,
pub file_number: u32,
pub idx: String,
}
impl FromStr for GroupInformation {
type Err = Error;
fn from_str(file_name: &str) -> Result<Self, Self::Err> {
let re = Regex::new(r".*?-([0-9a-fA-F]+)-(\d+)-(\d+)-(\d+)(\.[^.]+)$")
.map_err(|e| Error::msg(format!("Failed to compile regex: {e}")))?;
let caps = re
.captures(file_name)
.ok_or_else(|| Error::msg("File name does not match expected format"))?;
let groupid_start = caps
.get(1)
.ok_or_else(|| Error::msg("Failed to extract group ID"))?
.start();
let prefix = file_name[..groupid_start - 1].to_string();
let groupid = caps
.get(1)
.ok_or_else(|| Error::msg("Failed to extract group ID"))?
.as_str();
let groupsize = caps
.get(2)
.ok_or_else(|| Error::msg("Failed to extract group size"))?
.as_str()
.parse::<u32>()
.map_err(|e| Error::msg(format!("Failed to parse group size: {e}")))?;
let filenumber = caps
.get(3)
.ok_or_else(|| Error::msg("Failed to extract file number"))?
.as_str()
.parse::<u32>()
.map_err(|e| Error::msg(format!("Failed to parse file number: {e}")))?;
let idx = caps
.get(4)
.ok_or_else(|| Error::msg("Failed to extract index"))?
.as_str();
let extension = caps
.get(5)
.ok_or_else(|| Error::msg("Failed to extract extension"))?
.as_str();
let group_file_name = file_name[..file_name
.rfind('-')
.ok_or_else(|| Error::msg("Failed to find last hyphen in filename"))?]
.to_string()
+ extension;
Ok(Self {
group_file_name,
prefix,
group_id: groupid.to_string(),
group_size: groupsize,
file_number: filenumber,
idx: idx.to_string(),
})
}
}
#[derive(Clone, Debug)]
pub struct ToplocGroup {
pub group_file_name: String,
pub group_size: u32,
pub file_number: u32,
pub prefix: String,
pub sorted_work_keys: Vec<String>,
pub group_id: String,
}
impl From<GroupInformation> for ToplocGroup {
fn from(info: GroupInformation) -> Self {
Self {
group_file_name: info.group_file_name,
group_size: info.group_size,
file_number: info.file_number,
prefix: info.prefix,
sorted_work_keys: Vec::new(),
group_id: info.group_id,
}
}
}
impl GroupInformation {
pub fn to_redis(&self) -> Result<String, anyhow::Error> {
Ok(serde_json::to_string(self)?)
}
pub fn from_redis(s: &str) -> Result<Self, anyhow::Error> {
Ok(serde_json::from_str(s)?)
}
}
#[derive(Debug)]
pub struct ValidationPlan {
pub single_trigger_tasks: Vec<(String, WorkInfo)>,
pub group_trigger_tasks: Vec<ToplocGroup>,
pub status_check_tasks: Vec<String>,
pub group_status_check_tasks: Vec<ToplocGroup>,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/synthetic_data/chain_operations.rs | crates/validator/src/validators/synthetic_data/chain_operations.rs | use super::*;
impl SyntheticDataValidator<WalletProvider> {
#[cfg(test)]
pub fn soft_invalidate_work(&self, work_key: &str) -> Result<(), Error> {
info!("Soft invalidating work: {}", work_key);
if self.disable_chain_invalidation {
info!("Chain invalidation is disabled, skipping work soft invalidation");
return Ok(());
}
info!("Test mode: skipping actual work soft invalidation");
let _ = &self.prime_network;
Ok(())
}
#[cfg(not(test))]
pub async fn soft_invalidate_work(&self, work_key: &str) -> Result<(), Error> {
info!("Soft invalidating work: {work_key}");
if self.disable_chain_invalidation {
info!("Chain invalidation is disabled, skipping work soft invalidation");
return Ok(());
}
let work_info = self
.get_work_info_from_redis(work_key)
.await?
.ok_or_else(|| Error::msg("Work info not found for soft invalidation"))?;
let work_key_bytes = hex::decode(work_key)
.map_err(|e| Error::msg(format!("Failed to decode hex work key: {e}")))?;
// Create 64-byte payload: work_key (32 bytes) + work_units (32 bytes)
let mut data = Vec::with_capacity(64);
data.extend_from_slice(&work_key_bytes);
// Convert work_units to 32-byte representation
let work_units_bytes = work_info.work_units.to_be_bytes::<32>();
data.extend_from_slice(&work_units_bytes);
match self
.prime_network
.soft_invalidate_work(self.pool_id, data)
.await
{
Ok(_) => Ok(()),
Err(e) => {
error!("Failed to soft invalidate work {work_key}: {e}");
Err(Error::msg(format!("Failed to soft invalidate work: {e}")))
}
}
}
#[cfg(test)]
pub fn invalidate_work(&self, work_key: &str) -> Result<(), Error> {
info!("Invalidating work: {}", work_key);
if let Some(metrics) = &self.metrics {
metrics.record_work_key_invalidation();
}
if self.disable_chain_invalidation {
info!("Chain invalidation is disabled, skipping work invalidation");
return Ok(());
}
info!("Test mode: skipping actual work invalidation");
let _ = &self.prime_network;
let _ = &self.penalty;
Ok(())
}
#[cfg(not(test))]
pub async fn invalidate_work(&self, work_key: &str) -> Result<(), Error> {
info!("Invalidating work: {work_key}");
if let Some(metrics) = &self.metrics {
metrics.record_work_key_invalidation();
}
if self.disable_chain_invalidation {
info!("Chain invalidation is disabled, skipping work invalidation");
return Ok(());
}
let data = hex::decode(work_key)
.map_err(|e| Error::msg(format!("Failed to decode hex work key: {e}")))?;
match self
.prime_network
.invalidate_work(self.pool_id, self.penalty, data)
.await
{
Ok(_) => Ok(()),
Err(e) => {
error!("Failed to invalidate work {work_key}: {e}");
Err(Error::msg(format!("Failed to invalidate work: {e}")))
}
}
}
pub async fn invalidate_according_to_invalidation_type(
&self,
work_key: &str,
invalidation_type: InvalidationType,
) -> Result<(), Error> {
match invalidation_type {
#[cfg(test)]
InvalidationType::Soft => self.soft_invalidate_work(work_key),
#[cfg(not(test))]
InvalidationType::Soft => self.soft_invalidate_work(work_key).await,
#[cfg(test)]
InvalidationType::Hard => self.invalidate_work(work_key),
#[cfg(not(test))]
InvalidationType::Hard => self.invalidate_work(work_key).await,
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/synthetic_data/mod.rs | crates/validator/src/validators/synthetic_data/mod.rs | use crate::metrics::MetricsContext;
use crate::store::redis::RedisStore;
use crate::validators::synthetic_data::types::{InvalidationType, RejectionInfo};
use alloy::primitives::U256;
use anyhow::{Context as _, Error, Result};
use futures::future;
use log::{debug, warn};
use log::{error, info};
use redis::AsyncCommands;
use shared::utils::StorageProvider;
use shared::web3::contracts::implementations::prime_network_contract::PrimeNetworkContract;
use shared::web3::contracts::implementations::work_validators::synthetic_data_validator::{
SyntheticDataWorkValidator, WorkInfo,
};
use shared::web3::wallet::WalletProvider;
use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
use tokio_util::sync::CancellationToken;
pub(crate) mod chain_operations;
#[cfg(test)]
mod tests;
pub(crate) mod toploc;
pub(crate) mod types;
use toploc::{GroupValidationResult, Toploc, ToplocConfig};
use types::{
GroupInformation, ProcessWorkKeyError, ToplocGroup, ValidationPlan, ValidationResult,
WorkValidationInfo,
};
#[derive(Clone)]
pub struct SyntheticDataValidator<P: alloy::providers::Provider + Clone> {
pool_id: U256,
validator: SyntheticDataWorkValidator<P>,
prime_network: PrimeNetworkContract<P>,
toploc: Vec<Toploc>,
penalty: U256,
storage_provider: Arc<dyn StorageProvider>,
redis_store: RedisStore,
cancellation_token: CancellationToken,
work_validation_interval: u64,
unknown_status_expiry_seconds: u64,
grace_interval: u64,
batch_trigger_size: usize,
with_node_grouping: bool,
disable_chain_invalidation: bool,
/// **Storage**: Uses Redis sorted set "incomplete_groups" with deadline as score
incomplete_group_grace_period_minutes: u64,
/// Invalidation type for toploc validation failures
toploc_invalidation_type: InvalidationType,
/// Invalidation type for work unit mismatch failures
work_unit_invalidation_type: InvalidationType,
metrics: Option<MetricsContext>,
}
impl<P: alloy::providers::Provider + Clone + 'static> SyntheticDataValidator<P> {
#[allow(clippy::too_many_arguments)]
pub fn new(
pool_id_str: String,
validator: SyntheticDataWorkValidator<P>,
prime_network: PrimeNetworkContract<P>,
toploc_configs: Vec<ToplocConfig>,
penalty: U256,
storage_provider: Arc<dyn StorageProvider>,
redis_store: RedisStore,
cancellation_token: CancellationToken,
work_validation_interval: u64,
unknown_status_expiry_seconds: u64,
grace_interval: u64,
batch_trigger_size: usize,
with_node_grouping: bool,
disable_chain_invalidation: bool,
incomplete_group_grace_period_minutes: u64,
toploc_invalidation_type: InvalidationType,
work_unit_invalidation_type: InvalidationType,
metrics: Option<MetricsContext>,
) -> Self {
let pool_id = pool_id_str.parse::<U256>().expect("Invalid pool ID");
let mut toploc = Vec::new();
for config in toploc_configs {
toploc.push(Toploc::new(config, metrics.clone()));
}
info!("Toploc invalidation type: {toploc_invalidation_type:?}");
info!("Work unit invalidation type: {work_unit_invalidation_type:?}");
Self {
pool_id,
validator,
prime_network,
toploc,
penalty,
storage_provider,
redis_store,
cancellation_token,
work_validation_interval,
unknown_status_expiry_seconds,
grace_interval,
batch_trigger_size,
with_node_grouping,
disable_chain_invalidation,
incomplete_group_grace_period_minutes,
toploc_invalidation_type,
work_unit_invalidation_type,
metrics,
}
}
fn get_key_for_work_key(&self, work_key: &str) -> String {
format!("work_validation_status:{work_key}")
}
/// Starts tracking an incomplete group with a grace period for recovery.
///
/// **Purpose**: When a group doesn't have all expected submissions yet, we give it
/// a configurable grace period (e.g., 5 minutes) to allow remaining nodes to submit
/// their work before we soft invalidate the incomplete submissions.
///
/// **How it works**:
/// - Adds the group to a Redis sorted set called "incomplete_groups"
/// - Uses the "grace period deadline" as the sort score (not Redis TTL!)
/// - Only tracks if the group exists and isn't already being tracked
/// - The deadline is set ONLY ONCE when first tracking begins
///
async fn track_incomplete_group(&self, group_key: &str) -> Result<(), Error> {
if self.incomplete_group_grace_period_minutes == 0 {
return Ok(()); // Feature disabled
}
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
// Only track groups that actually exist in Redis (have at least one submission)
let group_exists: bool = con
.exists(group_key)
.await
.map_err(|e| Error::msg(format!("Failed to check group existence: {e}")))?;
if !group_exists {
// No point tracking a group that doesn't exist yet
return Ok(());
}
// Calculate when the grace period ends - this will only be used if not already tracked
let grace_period_deadline = chrono::Utc::now().timestamp()
+ (self.incomplete_group_grace_period_minutes as i64 * 60);
// Add to sorted set ONLY if not already present
// NX flag ensures the deadline is set exactly once when tracking begins
let mut cmd = redis::cmd("ZADD");
cmd.arg("incomplete_groups")
.arg("NX")
.arg(grace_period_deadline)
.arg(group_key);
let added: i32 = cmd
.query_async(&mut con)
.await
.map_err(|e| Error::msg(format!("Failed to track incomplete group: {e}")))?;
if added > 0 {
debug!(
"Started tracking incomplete group: {} with {}min grace period (deadline: {})",
group_key, self.incomplete_group_grace_period_minutes, grace_period_deadline
);
} else {
debug!("Group {group_key} already being tracked, preserving original deadline");
}
Ok(())
}
}
impl SyntheticDataValidator<WalletProvider> {
/// Finds groups whose grace period has ended and cleans them up.
async fn get_groups_past_grace_period(&self) -> Result<Vec<String>, Error> {
if self.incomplete_group_grace_period_minutes == 0 {
return Ok(Vec::new()); // Feature disabled
}
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let current_timestamp = chrono::Utc::now().timestamp();
// Get all groups whose grace period deadline has passed
// ZRANGEBYSCORE returns members with score between -infinity and current_timestamp
let groups_past_deadline: Vec<String> = con
.zrangebyscore("incomplete_groups", "-inf", current_timestamp)
.await
.map_err(|e| Error::msg(format!("Failed to get groups past grace period: {e}")))?;
// Remove these groups from tracking since their grace period is over
if !groups_past_deadline.is_empty() {
let _: () = con
.zrem("incomplete_groups", &groups_past_deadline)
.await
.map_err(|e| Error::msg(format!("Failed to remove groups from tracking: {e}")))?;
debug!(
"Found {} groups past their grace period",
groups_past_deadline.len()
);
}
Ok(groups_past_deadline)
}
/// Checks if a group is currently being tracked for incomplete recovery.
///Used in tests and debugging to verify that incomplete groups
#[cfg(test)]
async fn is_group_being_tracked_as_incomplete(&self, group_key: &str) -> Result<bool, Error> {
if self.incomplete_group_grace_period_minutes == 0 {
return Ok(false); // Feature disabled
}
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
// Check if group exists in the incomplete groups sorted set
// ZSCORE returns the score (deadline) if present, or None if not tracked
let score: Option<f64> = con
.zscore("incomplete_groups", group_key)
.await
.map_err(|e| Error::msg(format!("Failed to check incomplete tracking: {}", e)))?;
Ok(score.is_some())
}
/// Updates the grace period deadline for a tracked incomplete group.
/// Used in tests to simulate time passing by setting deadline relative to current time.
///
/// # Arguments
/// * `group_key` - The group to update
/// * `minutes_from_now` - Minutes from current timestamp (negative for past, positive for future)
#[cfg(test)]
async fn update_incomplete_group_deadline_relative(
&self,
group_key: &str,
minutes_from_now: i64,
) -> Result<(), Error> {
if self.incomplete_group_grace_period_minutes == 0 {
return Ok(()); // Feature disabled
}
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let current_timestamp = chrono::Utc::now().timestamp();
let new_deadline = current_timestamp + (minutes_from_now * 60);
// Update the score (deadline) for the group in the sorted set
let _: () = con
.zadd("incomplete_groups", group_key, new_deadline)
.await
.map_err(|e| {
Error::msg(format!("Failed to update incomplete group deadline: {}", e))
})?;
debug!(
"Updated deadline for incomplete group {} to {} ({} minutes from now)",
group_key, new_deadline, minutes_from_now
);
Ok(())
}
/// Stops tracking a group (called when group becomes complete or is invalidated).
async fn remove_incomplete_group_tracking(&self, group_key: &str) -> Result<(), Error> {
if self.incomplete_group_grace_period_minutes == 0 {
return Ok(()); // Feature disabled
}
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
// Remove the group from the incomplete groups sorted set
let removed: i32 = con
.zrem("incomplete_groups", group_key)
.await
.map_err(|e| Error::msg(format!("Failed to remove incomplete group tracking: {e}")))?;
if removed > 0 {
debug!("Stopped tracking incomplete group: {group_key}");
}
Ok(())
}
async fn update_work_validation_status(
&self,
work_key: &str,
status: &ValidationResult,
) -> Result<(), Error> {
self.update_work_validation_info(
work_key,
&WorkValidationInfo {
status: status.clone(),
reason: None,
},
)
.await
}
async fn update_work_validation_info(
&self,
work_key: &str,
validation_info: &WorkValidationInfo,
) -> Result<(), Error> {
let expiry = match validation_info.status {
// Must switch to pending within 60 seconds otherwise we resubmit it
ValidationResult::Unknown => self.unknown_status_expiry_seconds,
_ => 0,
};
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let key = self.get_key_for_work_key(work_key);
let validation_data = serde_json::to_string(&validation_info)?;
if expiry > 0 {
let _: () = con
.set_options(
&key,
&validation_data,
redis::SetOptions::default().with_expiration(redis::SetExpiry::EX(expiry)),
)
.await
.map_err(|e| Error::msg(format!("Failed to set work validation status: {e}")))?;
} else {
let _: () = con
.set(&key, &validation_data)
.await
.map_err(|e| Error::msg(format!("Failed to set work validation status: {e}")))?;
}
// Manage rejection tracking for efficient querying
self.update_rejection_tracking(&mut con, work_key, validation_info)
.await?;
Ok(())
}
async fn update_rejection_tracking(
&self,
con: &mut redis::aio::MultiplexedConnection,
work_key: &str,
validation_info: &WorkValidationInfo,
) -> Result<(), Error> {
let rejection_set_key = "work_rejections";
let rejection_data_key = format!("work_rejection_data:{work_key}");
let is_rejected = validation_info.status == ValidationResult::Reject;
if is_rejected {
// Add to rejections set with current timestamp
let timestamp = chrono::Utc::now().timestamp();
let _: () = con
.zadd(rejection_set_key, work_key, timestamp)
.await
.map_err(|e| Error::msg(format!("Failed to add to rejections set: {e}")))?;
// Store rejection details if reason exists
if let Some(reason) = &validation_info.reason {
let rejection_detail = serde_json::json!({
"reason": reason,
"timestamp": timestamp
});
let _: () = con
.set(&rejection_data_key, rejection_detail.to_string())
.await
.map_err(|e| Error::msg(format!("Failed to set rejection data: {e}")))?;
}
}
Ok(())
}
#[cfg(test)]
async fn get_work_validation_status_from_redis(
&self,
work_key: &str,
) -> Result<Option<ValidationResult>, Error> {
let validation_info = self.get_work_validation_info_from_redis(work_key).await?;
Ok(validation_info.map(|info| info.status))
}
#[cfg(test)]
async fn get_work_validation_info_from_redis(
&self,
work_key: &str,
) -> Result<Option<WorkValidationInfo>, Error> {
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let key = self.get_key_for_work_key(work_key);
let data: Option<String> = con
.get(key)
.await
.map_err(|e| Error::msg(format!("Failed to get work validation status: {}", e)))?;
match data {
Some(data) => {
// Try to parse as WorkValidationInfo first (new format)
if let Ok(validation_info) = serde_json::from_str::<WorkValidationInfo>(&data) {
Ok(Some(validation_info))
} else {
// Fall back to old format (just ValidationResult)
match serde_json::from_str::<ValidationResult>(&data) {
Ok(status) => Ok(Some(WorkValidationInfo {
status,
reason: None,
})),
Err(e) => Err(Error::msg(format!(
"Failed to parse work validation data: {}",
e
))),
}
}
}
None => Ok(None),
}
}
async fn update_work_info_in_redis(
&self,
work_key: &str,
work_info: &WorkInfo,
) -> Result<(), Error> {
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let key = format!("work_info:{work_key}");
let work_info = serde_json::to_string(&work_info)?;
let _: () = con
.set(&key, work_info)
.await
.map_err(|e| Error::msg(format!("Failed to set work info: {e}")))?;
Ok(())
}
async fn get_work_info_from_redis(&self, work_key: &str) -> Result<Option<WorkInfo>, Error> {
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let key = format!("work_info:{work_key}");
let work_info: Option<String> = con
.get(&key)
.await
.map_err(|e| Error::msg(format!("Failed to get work info: {e}")))?;
match work_info {
Some(work_info_str) => {
let work_info: WorkInfo = serde_json::from_str(&work_info_str)
.map_err(|e| Error::msg(format!("Failed to parse work info: {e}")))?;
// Check if work_info has invalid values
if !work_info.is_valid() {
Ok(None)
} else {
Ok(Some(work_info))
}
}
None => Ok(None),
}
}
async fn get_file_name_for_work_key(
&self,
work_key: &str,
) -> Result<String, ProcessWorkKeyError> {
let redis_key = format!("file_name:{work_key}");
let attempts_key = format!("file_name_attempts:{work_key}");
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await
.map_err(|e| ProcessWorkKeyError::GenericError(e.into()))?;
let file_name: Option<String> = con
.get(&redis_key)
.await
.map_err(|e| ProcessWorkKeyError::GenericError(e.into()))?;
if let Some(cached_file_name) = file_name {
return Ok(cached_file_name);
}
// Increment attempts counter
let attempts: i64 = con
.incr(&attempts_key, 1)
.await
.map_err(|e| ProcessWorkKeyError::GenericError(e.into()))?;
// Set expiry on attempts counter (24 hours)
let _: () = con
.expire(&attempts_key, 24 * 60 * 60)
.await
.map_err(|e| ProcessWorkKeyError::GenericError(e.into()))?;
const MAX_ATTEMPTS: i64 = 60;
if attempts >= MAX_ATTEMPTS {
// If we've tried too many times, soft invalidate the work and update its status
#[cfg(test)]
if let Err(_e) = self.soft_invalidate_work(work_key) {
// Test mode: error handling skipped
}
#[cfg(not(test))]
if let Err(e) = self.soft_invalidate_work(work_key).await {
error!(
"Failed to soft invalidate work after max filename resolution attempts: {e}"
);
}
// Set the validation status to FileNameResolutionFailed to prevent future processing
if let Err(e) = self
.update_work_validation_status(
work_key,
&ValidationResult::FileNameResolutionFailed,
)
.await
{
error!("Failed to update validation status after max attempts: {e}");
}
return Err(ProcessWorkKeyError::MaxAttemptsReached(format!(
"Failed to resolve filename after {MAX_ATTEMPTS} attempts for work key: {work_key}"
)));
}
let original_file_name = self
.storage_provider
.resolve_mapping_for_sha(work_key)
.await
.map_err(|e| ProcessWorkKeyError::FileNameResolutionError(e.to_string()))?;
if original_file_name.is_empty() {
error!("Failed to resolve original file name for work key: {work_key}");
if let Some(metrics) = &self.metrics {
metrics.record_work_key_error("file_name_resolution_failed");
}
return Err(ProcessWorkKeyError::FileNameResolutionError(format!(
"Failed to resolve original file name for work key: {work_key}"
)));
}
let cleaned_file_name = original_file_name
.strip_prefix('/')
.unwrap_or(&original_file_name);
let _: () = con
.set(&redis_key, cleaned_file_name)
.await
.map_err(|e| ProcessWorkKeyError::GenericError(e.into()))?;
// Reset attempts counter on success
let _: () = con
.del(&attempts_key)
.await
.map_err(|e| ProcessWorkKeyError::GenericError(e.into()))?;
Ok(cleaned_file_name.to_string())
}
async fn build_group_for_key(&self, work_key: &str) -> Result<String, Error> {
let file = match self.get_file_name_for_work_key(work_key).await {
Ok(name) => name,
Err(ProcessWorkKeyError::MaxAttemptsReached(_)) => {
// Status is already set in get_file_name_for_work_key
return Err(Error::msg(format!(
"Failed to resolve filename after max attempts for work key: {work_key}"
)));
}
Err(e) => {
error!("Failed to get file name for work key: {e}");
return Err(Error::msg(format!(
"Failed to get file name for work key: {e}"
)));
}
};
let group_info = GroupInformation::from_str(&file)?;
let group_key: String = format!(
"group:{}:{}:{}",
group_info.group_id, group_info.group_size, group_info.file_number
);
let mut redis = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
redis
.hset::<_, _, _, ()>(&group_key, work_key, group_info.to_redis()?)
.await
.map_err(|e| Error::msg(format!("Failed to set group info in Redis: {e}")))?;
Ok(group_key)
}
async fn is_group_ready_for_validation(&self, group_key: &str) -> Result<bool, Error> {
let mut redis = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let group_size: u32 = redis
.hlen::<_, u32>(group_key)
.await
.map_err(|e| Error::msg(format!("Failed to get group size from Redis: {e}")))?;
let expected_size = group_key
.split(':')
.nth(2)
.ok_or_else(|| Error::msg("Failed to get group size from group key"))?
.parse::<u32>()
.map_err(|e| Error::msg(format!("Failed to parse group size: {e}")))?;
Ok(group_size == expected_size)
}
async fn get_group(&self, work_key: &str) -> Result<Option<ToplocGroup>> {
let group_key = match self.build_group_for_key(work_key).await {
Ok(key) => key,
Err(e) => {
error!("Failed to build group key for work key {work_key}: {e}");
return Ok(None);
}
};
let ready_for_validation = self.is_group_ready_for_validation(&group_key).await?;
if ready_for_validation {
// Remove from incomplete group tracking since it's now complete
if let Err(e) = self.remove_incomplete_group_tracking(&group_key).await {
error!("Failed to remove incomplete group tracking: {e}");
}
let mut redis = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let group_entries: HashMap<String, String> = redis.hgetall(&group_key).await?;
let mut entries: Vec<(String, GroupInformation)> = Vec::new();
for (key, value) in group_entries {
let info = GroupInformation::from_redis(&value)
.map_err(|e| Error::msg(format!("Failed to parse group info: {e}")))?;
entries.push((key, info));
}
entries.sort_by_key(|(_, info)| info.idx.parse::<usize>().unwrap_or(0));
let mut toploc_group: ToplocGroup = entries
.first()
.ok_or_else(|| Error::msg("No group info found"))?
.1
.clone()
.into();
toploc_group.sorted_work_keys = entries.into_iter().map(|(key, _)| key).collect();
return Ok(Some(toploc_group));
} else {
// Track incomplete group for potential soft invalidation
if let Err(e) = self.track_incomplete_group(&group_key).await {
error!("Failed to track incomplete group: {e}");
}
}
Ok(None)
}
pub async fn build_validation_plan(
&self,
work_keys: Vec<String>,
) -> Result<ValidationPlan, Error> {
self.build_validation_plan_batched(work_keys).await
}
pub async fn build_validation_plan_batched(
&self,
work_keys: Vec<String>,
) -> Result<ValidationPlan, Error> {
let mut single_trigger_tasks: Vec<(String, WorkInfo)> = Vec::new();
let mut group_trigger_tasks: Vec<ToplocGroup> = Vec::new();
let mut status_check_tasks: Vec<String> = Vec::new();
let mut group_status_check_tasks: Vec<ToplocGroup> = Vec::new();
let mut keys_to_process = 0;
// Step 1: Batch fetch work info and validation status from Redis
let (work_info_map, status_map) = self.batch_fetch_redis_data(&work_keys).await?;
// Step 2: Collect work keys that need blockchain lookup
let mut missing_work_keys = Vec::new();
for work_key in &work_keys {
if !work_info_map.contains_key(work_key) {
missing_work_keys.push(work_key.clone());
}
}
// Step 3: Batch fetch missing work info from blockchain
let blockchain_work_info = self
.batch_fetch_blockchain_work_info(missing_work_keys)
.await?;
// Step 4: Process all work keys with cached + fetched data
for work_key in work_keys {
// Get work info - either from cache or blockchain fetch
let work_info = if let Some(info) = work_info_map.get(&work_key) {
info
} else if let Some(info) = blockchain_work_info.get(&work_key) {
// Cache the fetched work info asynchronously
let self_clone = self.clone();
let work_key_clone = work_key.clone();
let info_copy = *info;
tokio::spawn(async move {
if let Err(e) = self_clone
.update_work_info_in_redis(&work_key_clone, &info_copy)
.await
{
error!("Failed to cache work info for {work_key_clone}: {e}");
}
});
info
} else {
error!("Failed to get work info for {work_key}");
continue;
};
let cache_status = status_map.get(&work_key);
match cache_status {
Some(
ValidationResult::Accept
| ValidationResult::Reject
| ValidationResult::Crashed
| ValidationResult::IncompleteGroup
| ValidationResult::FileNameResolutionFailed,
) => {
continue; // Already processed
}
Some(ValidationResult::Unknown) => {
keys_to_process += 1;
if self.with_node_grouping {
let check_group = self.get_group(&work_key).await?;
if let Some(group) = check_group {
if !group_status_check_tasks.iter().any(|g| {
g.group_id == group.group_id && g.file_number == group.file_number
}) {
group_status_check_tasks.push(group);
}
}
} else {
status_check_tasks.push(work_key); // Check status
}
}
Some(_) | None => {
keys_to_process += 1;
// Needs triggering (covers Pending, Invalidated, and None cases)
if self.with_node_grouping {
let check_group = self.get_group(&work_key).await?;
if let Some(group) = check_group {
group_trigger_tasks.push(group);
}
} else {
single_trigger_tasks.push((work_key.clone(), *work_info));
}
}
}
}
info!(
"keys_to_process: {keys_to_process} (including keys with no status or pending status)"
);
if let Some(metrics) = &self.metrics {
metrics.record_work_keys_to_process(keys_to_process as f64);
}
Ok(ValidationPlan {
single_trigger_tasks,
group_trigger_tasks,
status_check_tasks,
group_status_check_tasks,
})
}
/// Batch fetch work info and validation status from Redis using pipelining
async fn batch_fetch_redis_data(
&self,
work_keys: &[String],
) -> Result<(HashMap<String, WorkInfo>, HashMap<String, ValidationResult>), Error> {
let mut con = self
.redis_store
.client
.get_multiplexed_async_connection()
.await?;
let mut pipe = redis::pipe();
for work_key in work_keys {
let work_info_key = format!("work_info:{work_key}");
pipe.get(&work_info_key);
}
for work_key in work_keys {
let status_key = self.get_key_for_work_key(work_key);
pipe.get(status_key);
}
let results: Vec<Option<String>> = pipe
.query_async(&mut con)
.await
.map_err(|e| Error::msg(format!("Failed to execute Redis pipeline: {e}")))?;
let mut work_info_map = HashMap::new();
let mut status_map = HashMap::new();
let work_keys_len = work_keys.len();
for (i, work_key) in work_keys.iter().enumerate() {
if let Some(Some(work_info_str)) = results.get(i) {
match serde_json::from_str::<WorkInfo>(work_info_str) {
Ok(work_info) => {
work_info_map.insert(work_key.clone(), work_info);
}
Err(e) => {
debug!("Failed to parse work info for {work_key}: {e}");
}
}
}
}
for (i, work_key) in work_keys.iter().enumerate() {
if let Some(Some(status_str)) = results.get(i + work_keys_len) {
if let Ok(validation_info) = serde_json::from_str::<WorkValidationInfo>(status_str)
{
status_map.insert(work_key.clone(), validation_info.status);
} else {
// Fall back to old format (just ValidationResult)
match serde_json::from_str::<ValidationResult>(status_str) {
Ok(status) => {
status_map.insert(work_key.clone(), status);
}
Err(e) => {
debug!("Failed to parse validation status for {work_key}: {e}");
}
}
}
}
}
Ok((work_info_map, status_map))
}
/// Batch fetch work info from blockchain for multiple work keys
async fn batch_fetch_blockchain_work_info(
&self,
work_keys: Vec<String>,
) -> Result<HashMap<String, WorkInfo>, Error> {
if work_keys.is_empty() {
return Ok(HashMap::new());
}
let futures: Vec<_> = work_keys
.into_iter()
.map(|work_key| {
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/synthetic_data/toploc.rs | crates/validator/src/validators/synthetic_data/toploc.rs | use crate::metrics::MetricsContext;
use super::ValidationResult;
use anyhow::Error;
use log::{debug, warn};
use log::{error, info};
use serde::{Deserialize, Serialize};
#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)]
pub struct ToplocConfig {
pub server_url: String,
pub auth_token: Option<String>,
pub file_prefix_filter: Option<String>,
}
#[derive(Clone, Debug)]
pub(crate) struct Toploc {
config: ToplocConfig,
client: reqwest::Client,
metrics: Option<MetricsContext>,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub(crate) struct GroupValidationResult {
pub status: ValidationResult,
pub input_flops: f64,
pub output_flops: f64,
// This tells us which node(s) in a group actually failed the toploc validation
pub failing_indices: Vec<i64>,
pub reason: Option<String>,
}
impl Toploc {
pub(crate) fn new(config: ToplocConfig, metrics: Option<MetricsContext>) -> Self {
let client = reqwest::Client::builder()
.default_headers({
let mut headers = reqwest::header::HeaderMap::new();
if let Some(token) = &config.auth_token {
headers.insert(
reqwest::header::AUTHORIZATION,
reqwest::header::HeaderValue::from_str(&format!("Bearer {token}"))
.expect("Invalid token"),
);
}
headers
})
.timeout(std::time::Duration::from_secs(30))
.connect_timeout(std::time::Duration::from_secs(10))
.build()
.expect("Failed to build HTTP client");
Self {
config,
client,
metrics,
}
}
pub(crate) fn name(&self) -> String {
let prefix = self
.config
.file_prefix_filter
.clone()
.unwrap_or_else(|| "n/a".to_string());
prefix.to_string() // e.g. Qwen/Qwen3-14B
}
fn normalize_path(&self, path: &str) -> String {
// Remove leading slashes and normalize any double slashes
path.trim_start_matches('/').replace("//", "/")
}
fn remove_prefix_if_present(&self, file_name: &str) -> String {
let normalized_name = self.normalize_path(file_name);
match &self.config.file_prefix_filter {
Some(prefix) if normalized_name.starts_with(prefix) => {
self.normalize_path(&normalized_name[prefix.len()..])
}
_ => normalized_name,
}
}
pub(crate) fn matches_file_name(&self, file_name: &str) -> bool {
let normalized_name = self.normalize_path(file_name);
match &self.config.file_prefix_filter {
Some(prefix) => {
normalized_name == *prefix || {
normalized_name.starts_with(prefix)
&& normalized_name[prefix.len()..].starts_with('/')
}
}
None => true,
}
}
pub(crate) async fn trigger_single_file_validation(
&self,
file_sha: &str,
key_address: &str,
file_name: &str,
) -> Result<(), Error> {
let processed_file_name = self.remove_prefix_if_present(file_name);
let validate_url = format!(
"{}/validate/{}",
self.config.server_url, processed_file_name
);
debug!("Triggering remote toploc validation for {file_name} {validate_url}");
let body = serde_json::json!({
"file_sha": file_sha,
"address": key_address
});
let start_time = std::time::Instant::now();
match &self.client.post(&validate_url).json(&body).send().await {
Ok(response) => {
let status = response.status();
if !status.is_success() {
error!("Server returned error status {status} for {file_name}");
if let Some(metrics) = &self.metrics {
metrics.record_api_request(
"toploc_single_file_validation",
&status.to_string(),
);
}
return Err(Error::msg(format!(
"Server returned error status: {status}"
)));
}
let trigger_duration = start_time.elapsed();
if let Some(metrics) = &self.metrics {
metrics.record_api_duration(
"toploc_single_file_validation",
trigger_duration.as_secs_f64(),
);
metrics
.record_api_request("toploc_single_file_validation", &status.to_string());
}
info!("Remote toploc validation triggered for {file_name} in {trigger_duration:?}");
Ok(())
}
Err(e) => {
let error_msg = if e.is_timeout() {
format!("Toploc request timed out for {file_name}: {e}")
} else if e.is_connect() {
format!("Failed to connect to toploc server for {file_name}: {e}")
} else {
format!("Failed to trigger remote toploc validation for {file_name}: {e}")
};
error!("{error_msg}");
if let Some(metrics) = &self.metrics {
metrics.record_api_request("toploc_single_file_validation", "0");
}
Err(Error::msg(error_msg))
}
}
}
pub(crate) async fn trigger_group_file_validation(
&self,
file_name: &str,
file_shas: Vec<String>,
group_id: &str,
file_number: u32,
group_size: u32,
) -> Result<(), Error> {
let processed_file_name = self.remove_prefix_if_present(file_name);
let validate_url = format!(
"{}/validategroup/{}",
self.config.server_url, processed_file_name
);
info!("Triggering remote toploc group validation for {file_name} {validate_url}");
let body = serde_json::json!({
"file_shas": file_shas,
"group_id": group_id,
"file_number": file_number,
"group_size": group_size
});
let start_time = std::time::Instant::now();
match &self.client.post(&validate_url).json(&body).send().await {
Ok(response) => {
let status = response.status();
if !status.is_success() {
error!("Server returned error status {status} for {file_name}");
if let Some(metrics) = &self.metrics {
metrics.record_api_request(
"toploc_group_file_validation",
&status.to_string(),
);
}
return Err(Error::msg(format!(
"Server returned error status: {status}"
)));
}
let trigger_duration = start_time.elapsed();
if let Some(metrics) = &self.metrics {
metrics.record_api_duration(
"toploc_group_file_validation",
trigger_duration.as_secs_f64(),
);
metrics.record_api_request("toploc_group_file_validation", &status.to_string());
}
info!(
"Remote toploc group validation triggered for {file_name} in {trigger_duration:?}"
);
Ok(())
}
Err(e) => {
let error_msg = if e.is_timeout() {
format!("Toploc group request timed out for {file_name}: {e}")
} else if e.is_connect() {
format!("Failed to connect to toploc server for group {file_name}: {e}")
} else {
format!("Failed to trigger remote toploc group validation for {file_name}: {e}")
};
error!("{error_msg}");
if let Some(metrics) = &self.metrics {
metrics.record_api_request("toploc_group_file_validation", "0");
}
Err(Error::msg(error_msg))
}
}
}
pub(crate) async fn get_group_file_validation_status(
&self,
file_name: &str,
) -> Result<GroupValidationResult, Error> {
let processed_file_name = self.remove_prefix_if_present(file_name);
debug!("Processed file name: {processed_file_name}");
let url = format!(
"{}/statusgroup/{}",
self.config.server_url, processed_file_name
);
debug!("Processing URL: {url}");
let start_time = std::time::Instant::now();
match self.client.get(&url).send().await {
Ok(response) => {
let status = response.status();
if status != reqwest::StatusCode::OK {
error!("Unexpected status code {status} for {file_name}");
if let Some(metrics) = &self.metrics {
metrics.record_api_request("toploc_get_group_status", &status.to_string());
}
return Err(Error::msg(format!("Unexpected status code: {status}")));
}
let status_json: serde_json::Value = response.json().await.map_err(|e| {
error!("Failed to parse JSON response for {file_name}: {e}");
Error::msg(format!("Failed to parse JSON response: {e}"))
})?;
let duration = start_time.elapsed();
if let Some(metrics) = &self.metrics {
metrics.record_api_duration("toploc_get_group_status", duration.as_secs_f64());
metrics.record_api_request("toploc_get_group_status", &status.to_string());
}
if status_json.get("status").is_none() {
error!("No status found for {file_name}");
Err(Error::msg("No status found"))
} else {
match status_json.get("status").and_then(|s| s.as_str()) {
Some(status) => {
debug!("Validation status for {file_name}: {status}");
let validation_result = match status {
"accept" => ValidationResult::Accept,
"reject" => ValidationResult::Reject,
"crashed" => ValidationResult::Crashed,
"pending" => ValidationResult::Pending,
_ => ValidationResult::Unknown,
};
let input_flops = status_json
.get("input_flops")
.and_then(|f| f.as_f64())
.unwrap_or(0.0);
let output_flops = status_json
.get("output_flops")
.and_then(|f| f.as_f64())
.unwrap_or(0.0);
let failing_indices = status_json
.get("failing_indices")
.and_then(|f| f.as_array())
.map(|arr| {
arr.iter().filter_map(|v| v.as_i64()).collect::<Vec<i64>>()
})
.unwrap_or_default();
let reason = status_json
.get("reason")
.and_then(|r| r.as_str())
.map(|s| s.to_string());
Ok(GroupValidationResult {
status: validation_result,
input_flops,
output_flops,
failing_indices,
reason,
})
}
None => {
error!("No status found for {file_name}");
Err(Error::msg("No status found"))
}
}
}
}
Err(e) => {
let error_msg = if e.is_timeout() {
format!("Toploc status check timed out for {file_name}: {e}")
} else if e.is_connect() {
format!("Failed to connect to toploc server for status check {file_name}: {e}")
} else {
format!("Failed to poll remote toploc group validation for {file_name}: {e}")
};
error!("{error_msg}");
Err(Error::msg(error_msg))
}
}
}
pub(crate) async fn get_single_file_validation_status(
&self,
file_name: &str,
) -> Result<ValidationResult, Error> {
let processed_file_name = self.remove_prefix_if_present(file_name);
let url = format!("{}/status/{}", self.config.server_url, processed_file_name);
match self.client.get(&url).send().await {
Ok(response) => {
if response.status() != reqwest::StatusCode::OK {
error!(
"Unexpected status code {} for {}",
response.status(),
file_name
);
return Err(Error::msg(format!(
"Unexpected status code: {}",
response.status()
)));
}
let status_json: serde_json::Value = response.json().await.map_err(|e| {
error!("Failed to parse JSON response for {file_name}: {e}");
Error::msg(format!("Failed to parse JSON response: {e}"))
})?;
if status_json.get("status").is_none() {
error!("No status found for {file_name}");
Err(Error::msg("No status found"))
} else {
match status_json.get("status").and_then(|s| s.as_str()) {
Some(status) => {
debug!("Validation status for {file_name}: {status}");
let validation_result = match status {
"accept" => ValidationResult::Accept,
"reject" => ValidationResult::Reject,
"crashed" => ValidationResult::Crashed,
"pending" => ValidationResult::Pending,
_ => {
warn!("Unknown status found for {file_name}: {status}");
ValidationResult::Unknown
}
};
Ok(validation_result)
}
None => {
error!("No status found for {file_name}");
Err(Error::msg("No status found"))
}
}
}
}
Err(e) => {
error!("Failed to poll remote toploc validation for {file_name}: {e}");
Err(Error::msg(format!(
"Failed to poll remote toploc validation: {e}"
)))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use mockito::Server;
#[tokio::test]
async fn test_single_file_validation_success() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _trigger_mock = server
.mock("POST", "/validate/test-file.parquet")
.with_status(200)
.match_body(mockito::Matcher::Json(serde_json::json!({
"file_sha": "abc123",
"address": "0x456"
})))
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.trigger_single_file_validation("abc123", "0x456", "test-file.parquet")
.await;
assert!(result.is_ok());
Ok(())
}
#[tokio::test]
async fn test_group_file_validation_success() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _group_mock = server
.mock("POST", "/validategroup/test-group.parquet")
.with_status(200)
.match_body(mockito::Matcher::Json(serde_json::json!({
"file_shas": ["sha1", "sha2"],
"group_id": "group123",
"file_number": 1,
"group_size": 2
})))
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.trigger_group_file_validation(
"test-group.parquet",
vec!["sha1".to_string(), "sha2".to_string()],
"group123",
1,
2,
)
.await;
assert!(result.is_ok());
Ok(())
}
#[tokio::test]
async fn test_group_file_validation_error() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _group_mock = server
.mock("POST", "/validategroup/test-group.parquet")
.with_status(400)
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.trigger_group_file_validation(
"test-group.parquet",
vec!["sha1".to_string(), "sha2".to_string()],
"group123",
1,
2,
)
.await;
assert!(result.is_err());
assert!(result
.unwrap_err()
.to_string()
.contains("Server returned error status: 400"));
Ok(())
}
#[tokio::test]
async fn test_single_file_status_accept() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _status_mock = server
.mock("GET", "/status/test-file.parquet")
.with_status(200)
.with_body(r#"{"status": "accept"}"#)
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.get_single_file_validation_status("test-file.parquet")
.await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), ValidationResult::Accept);
Ok(())
}
#[tokio::test]
async fn test_single_file_status_reject() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _status_mock = server
.mock("GET", "/status/test-file.parquet")
.with_status(200)
.with_body(r#"{"status": "reject"}"#)
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.get_single_file_validation_status("test-file.parquet")
.await;
assert!(result.is_ok());
assert_eq!(result.unwrap(), ValidationResult::Reject);
Ok(())
}
#[tokio::test]
async fn test_group_file_status_success_with_flops() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _status_mock = server
.mock("GET", "/statusgroup/test-group.parquet")
.with_status(200)
.with_body(r#"{"status": "accept", "input_flops": 12345.67, "output_flops": 12345.67, "failing_indices": [], "reason": null}"#)
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.get_group_file_validation_status("test-group.parquet")
.await;
assert!(result.is_ok());
let group_result = result.unwrap();
assert_eq!(group_result.status, ValidationResult::Accept);
assert_eq!(group_result.input_flops, 12345.67);
assert_eq!(group_result.output_flops, 12345.67);
assert!(group_result.failing_indices.is_empty());
assert_eq!(group_result.reason, None);
Ok(())
}
#[tokio::test]
async fn test_group_file_status_reject_with_failing_indices() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _status_mock = server
.mock("GET", "/statusgroup/test-group.parquet")
.with_status(200)
.with_body(r#"{"status": "reject", "input_flops": 0.0, "output_flops": 0.0, "failing_indices": [1, 3, 5], "reason": "Validation failed due to mismatched outputs"}"#)
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.get_group_file_validation_status("test-group.parquet")
.await;
assert!(result.is_ok());
let group_result = result.unwrap();
assert_eq!(group_result.status, ValidationResult::Reject);
assert_eq!(group_result.input_flops, 0.0);
assert_eq!(group_result.output_flops, 0.0);
assert_eq!(group_result.failing_indices, vec![1, 3, 5]);
assert_eq!(
group_result.reason,
Some("Validation failed due to mismatched outputs".to_string())
);
Ok(())
}
#[tokio::test]
async fn test_file_prefix_filter_matching() {
let configs = vec![
ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("Qwen/Qwen3-235B-A22B".to_string()),
},
ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("Qwen/Qwen3-32B".to_string()),
},
ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("Qwen/Qwen3-30B-A3B".to_string()),
},
ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("Qwen/Qwen3-14B".to_string()),
},
ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("deepseek-ai/DeepSeek-R1-0528".to_string()),
},
ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("deepseek-ai/DeepSeek-R1-0528-Qwen3-8B".to_string()),
},
];
let test_cases = vec![
// Test Qwen 235B model
("Qwen/Qwen3-235B-A22B/data.parquet", Some(0)),
("Qwen/Qwen3-235B-A22B", Some(0)),
("Qwen/Qwen3-235B-A22B-extra/data.parquet", None),
("qwen/qwen3-235b-a22b/data.parquet", None), // Case sensitive
// Test Qwen 32B model
("Qwen/Qwen3-32B/data.parquet", Some(1)),
("Qwen/Qwen3-32B", Some(1)),
("Qwen/Qwen3-32B-extra/data.parquet", None),
// Test Qwen 30B model
("Qwen/Qwen3-30B-A3B/data.parquet", Some(2)),
("Qwen/Qwen3-30B-A3B", Some(2)),
("Qwen/Qwen3-30B-A3B-extra/data.parquet", None),
// Test Qwen 14B model
("Qwen/Qwen3-14B/data.parquet", Some(3)),
("Qwen/Qwen3-14B", Some(3)),
("Qwen/Qwen3-14B-extra/data.parquet", None),
// Test DeepSeek base model
("deepseek-ai/DeepSeek-R1-0528/data.parquet", Some(4)),
("deepseek-ai/DeepSeek-R1-0528", Some(4)),
(
"deepseek-ai/DeepSeek-R1-0528-Qwen3-8B/data.parquet",
Some(5),
),
("deepseek-ai/deepseek-r1-0528/data.parquet", None), // Case sensitive
];
for (test_file, expected_match) in test_cases {
let mut matched = false;
let mut matched_idx = None;
for (idx, config) in configs.iter().enumerate() {
let toploc = Toploc::new(config.clone(), None);
if toploc.matches_file_name(test_file) {
matched = true;
matched_idx = Some(idx);
break;
}
}
match expected_match {
Some(expected_idx) => {
assert!(
matched,
"Expected file {} to match config {}",
test_file, expected_idx
);
assert_eq!(
matched_idx,
Some(expected_idx),
"File {} matched config {} but expected {}",
test_file,
matched_idx.unwrap(),
expected_idx
);
}
None => assert!(!matched, "File {} should not match any config", test_file),
}
}
}
#[tokio::test]
async fn test_nested_filter() {
let config = ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: Some("Qwen/Qwen0.6".to_string()),
};
let toploc = Toploc::new(config, None);
assert!(toploc.matches_file_name("/Qwen/Qwen0.6/-model-data.parquet"));
assert!(toploc.matches_file_name("Qwen/Qwen0.6"));
assert!(!toploc.matches_file_name("Qwen/Qwen0.7-model-data.parquet"));
assert!(!toploc.matches_file_name("qwen3-lowercase.parquet")); // Case sensitive
}
#[tokio::test]
async fn test_file_prefix_filter_none() {
let config = ToplocConfig {
server_url: "http://test".to_string(),
auth_token: None,
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
// Should match everything when no filter is set
assert!(toploc.matches_file_name("Qwen3-model-data.parquet"));
assert!(toploc.matches_file_name("GPT4-model-data.parquet"));
assert!(toploc.matches_file_name("any-file-name.txt"));
assert!(toploc.matches_file_name(""));
}
#[tokio::test]
async fn test_auth_token_header() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _mock = server
.mock("POST", "/validate/test.parquet")
.with_status(200)
.match_header("Authorization", "Bearer secret-token")
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: Some("secret-token".to_string()),
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.trigger_single_file_validation("abc123", "0x456", "test.parquet")
.await;
assert!(result.is_ok());
Ok(())
}
#[tokio::test]
async fn test_group_validation_with_auth_token() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _group_mock = server
.mock("POST", "/validategroup/test-group.parquet")
.with_status(200)
.match_header("Authorization", "Bearer group-token")
.create();
let config = ToplocConfig {
server_url: server.url(),
auth_token: Some("group-token".to_string()),
file_prefix_filter: None,
};
let toploc = Toploc::new(config, None);
let result = toploc
.trigger_group_file_validation(
"test-group.parquet",
vec!["sha1".to_string(), "sha2".to_string()],
"group123",
1,
2,
)
.await;
assert!(result.is_ok());
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/validators/synthetic_data/tests/mod.rs | crates/validator/src/validators/synthetic_data/tests/mod.rs | use crate::metrics::export_metrics;
use super::*;
use alloy::primitives::Address;
use anyhow::Ok;
use mockito::Server;
use shared::utils::MockStorageProvider;
use shared::web3::contracts::core::builder::{ContractBuilder, Contracts};
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
fn test_store() -> RedisStore {
let store = RedisStore::new_test();
let mut con = store
.client
.get_connection()
.expect("Should connect to test Redis instance");
redis::cmd("PING")
.query::<String>(&mut con)
.expect("Redis should be responsive");
redis::cmd("FLUSHALL")
.query::<String>(&mut con)
.expect("Redis should be flushed");
store
}
fn setup_test_env() -> Result<(RedisStore, Contracts<WalletProvider>), Error> {
let store = test_store();
let url = Url::parse("http://localhost:8545").unwrap();
let demo_wallet = Wallet::new(
"0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97",
url,
)
.map_err(|e| Error::msg(format!("Failed to create demo wallet: {}", e)))?;
let contracts = ContractBuilder::new(demo_wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.with_domain_registry()
.with_stake_manager()
.with_synthetic_data_validator(Some(Address::ZERO))
.build()
.map_err(|e| Error::msg(format!("Failed to build contracts: {}", e)))?;
Ok((store, contracts))
}
#[tokio::test]
async fn test_build_validation_plan() -> Result<(), Error> {
let (store, contracts) = setup_test_env()?;
let metrics_context = MetricsContext::new("0".to_string(), Some("0".to_string()));
let mock_storage = MockStorageProvider::new();
let single_group_file_name = "Qwen3/dataset/samplingn-9999999-1-9-0.parquet";
mock_storage.add_file(single_group_file_name, "file1").await;
mock_storage
.add_mapping_file(
"9999999999999999999999999999999999999999999999999999999999999999",
single_group_file_name,
)
.await;
let single_unknown_file_name = "Qwen3/dataset/samplingn-8888888-1-9-0.parquet";
mock_storage
.add_file(single_unknown_file_name, "file1")
.await;
mock_storage
.add_mapping_file(
"8888888888888888888888888888888888888888888888888888888888888888",
single_unknown_file_name,
)
.await;
mock_storage
.add_file(
"Qwen3/dataset/samplingn-3450756714426841564-2-9-0.parquet",
"file1",
)
.await;
mock_storage
.add_file(
"Qwen3/dataset/samplingn-3450756714426841564-2-9-1.parquet",
"file2",
)
.await;
mock_storage
.add_mapping_file(
"c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641",
"Qwen3/dataset/samplingn-3450756714426841564-2-9-0.parquet",
)
.await;
mock_storage
.add_mapping_file(
"88e4672c19e5a10bff2e23d223f8bfc38ae1425feaa18db9480e631a4fd98edf",
"Qwen3/dataset/samplingn-3450756714426841564-2-9-1.parquet",
)
.await;
let storage_provider = Arc::new(mock_storage);
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![ToplocConfig {
server_url: "http://localhost:8080".to_string(),
..Default::default()
}],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
true,
false,
0, // incomplete_group_grace_period_minutes (disabled)
InvalidationType::Hard,
InvalidationType::Hard,
Some(metrics_context),
);
let work_keys = vec![
"9999999999999999999999999999999999999999999999999999999999999999".to_string(),
"c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641".to_string(),
"88e4672c19e5a10bff2e23d223f8bfc38ae1425feaa18db9480e631a4fd98edf".to_string(),
"8888888888888888888888888888888888888888888888888888888888888888".to_string(),
];
let work_info = WorkInfo {
node_id: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f68").unwrap(),
provider: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f68").unwrap(),
work_units: U256::from(1000),
..Default::default()
};
for work_key in work_keys.clone() {
validator
.update_work_info_in_redis(&work_key, &work_info)
.await?;
}
validator
.update_work_validation_status(&work_keys[3], &ValidationResult::Unknown)
.await?;
let validation_plan = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(validation_plan.single_trigger_tasks.len(), 0);
assert_eq!(validation_plan.group_trigger_tasks.len(), 2);
assert_eq!(validation_plan.status_check_tasks.len(), 0);
assert_eq!(validation_plan.group_status_check_tasks.len(), 1);
let metrics = export_metrics().unwrap();
assert!(metrics.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 4"));
Ok(())
}
#[tokio::test]
async fn test_status_update() -> Result<(), Error> {
let (store, contracts) = setup_test_env()?;
let mock_storage = MockStorageProvider::new();
let storage_provider = Arc::new(mock_storage);
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![ToplocConfig {
server_url: "http://localhost:8080".to_string(),
..Default::default()
}],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
false,
false,
0, // incomplete_group_grace_period_minutes (disabled)
InvalidationType::Hard,
InvalidationType::Hard,
None,
);
validator
.update_work_validation_status(
"0x0000000000000000000000000000000000000000",
&ValidationResult::Accept,
)
.await
.map_err(|e| {
error!("Failed to update work validation status: {}", e);
Error::msg(format!("Failed to update work validation status: {}", e))
})?;
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
let status = validator
.get_work_validation_status_from_redis("0x0000000000000000000000000000000000000000")
.await
.map_err(|e| {
error!("Failed to get work validation status: {}", e);
Error::msg(format!("Failed to get work validation status: {}", e))
})?;
assert_eq!(status, Some(ValidationResult::Accept));
Ok(())
}
#[tokio::test]
async fn test_group_filename_parsing() -> Result<(), Error> {
// Test case 1: Valid filename with all components
let name = "Qwen3/dataset/samplingn-3450756714426841564-2-9-1.parquet";
let group_info = GroupInformation::from_str(name)?;
assert_eq!(
group_info.group_file_name,
"Qwen3/dataset/samplingn-3450756714426841564-2-9.parquet"
);
assert_eq!(group_info.prefix, "Qwen3/dataset/samplingn");
assert_eq!(group_info.group_id, "3450756714426841564");
assert_eq!(group_info.group_size, 2);
assert_eq!(group_info.file_number, 9);
assert_eq!(group_info.idx, "1");
// Test case 2: Invalid filename format
let invalid_name = "invalid-filename.parquet";
assert!(GroupInformation::from_str(invalid_name).is_err());
// Test case 3: Filename with different numbers
let name2 = "test/dataset/data-123-5-10-3.parquet";
let group_info2 = GroupInformation::from_str(name2)?;
assert_eq!(
group_info2.group_file_name,
"test/dataset/data-123-5-10.parquet"
);
assert_eq!(group_info2.prefix, "test/dataset/data");
assert_eq!(group_info2.group_id, "123");
assert_eq!(group_info2.group_size, 5);
assert_eq!(group_info2.file_number, 10);
assert_eq!(group_info2.idx, "3");
Ok(())
}
#[tokio::test]
async fn test_group_build() -> Result<(), Error> {
let (store, contracts) = setup_test_env()?;
let config = ToplocConfig {
server_url: "http://localhost:8080".to_string(),
..Default::default()
};
let mock_storage = MockStorageProvider::new();
mock_storage
.add_file(
"Qwen3/dataset/samplingn-3450756714426841564-2-9-1.parquet",
"file1",
)
.await;
mock_storage
.add_mapping_file(
"c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641",
"Qwen3/dataset/samplingn-3450756714426841564-2-9-1.parquet",
)
.await;
mock_storage
.add_file(
"Qwen3/dataset/samplingn-3450756714426841564-2-9-0.parquet",
"file2",
)
.await;
mock_storage
.add_mapping_file(
"88e4672c19e5a10bff2e23d223f8bfc38ae1425feaa18db9480e631a4fd98edf",
"Qwen3/dataset/samplingn-3450756714426841564-2-9-0.parquet",
)
.await;
let storage_provider = Arc::new(mock_storage);
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![config],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
false,
false,
0, // incomplete_group_grace_period_minutes (disabled)
InvalidationType::Hard,
InvalidationType::Hard,
None,
);
let group = validator
.get_group("c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641")
.await?;
assert!(group.is_none());
let group = validator
.get_group("88e4672c19e5a10bff2e23d223f8bfc38ae1425feaa18db9480e631a4fd98edf")
.await?;
assert!(group.is_some());
let group = group.unwrap();
assert_eq!(&group.sorted_work_keys.len(), &2);
assert_eq!(
&group.sorted_work_keys[0],
"88e4672c19e5a10bff2e23d223f8bfc38ae1425feaa18db9480e631a4fd98edf"
);
Ok(())
}
#[tokio::test]
async fn test_group_e2e_accept() -> Result<(), Error> {
let mut server = Server::new_async().await;
let (store, contracts) = setup_test_env()?;
let config = ToplocConfig {
server_url: server.url(),
file_prefix_filter: Some("Qwen/Qwen0.6".to_string()),
..Default::default()
};
const FILE_SHA: &str = "c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641";
const GROUP_ID: &str = "3450756714426841564";
const NODE_ADDRESS: &str = "0xA1DDe6E4d2F127960e7C61f90a8b354Bc306bd2a";
let mock_storage = MockStorageProvider::new();
mock_storage
.add_file(
&format!("Qwen/Qwen0.6/dataset/samplingn-{}-1-0-0.parquet", GROUP_ID),
"file1",
)
.await;
mock_storage
.add_mapping_file(
FILE_SHA,
&format!("Qwen/Qwen0.6/dataset/samplingn-{}-1-0-0.parquet", GROUP_ID),
)
.await;
server
.mock(
"POST",
format!("/validategroup/dataset/samplingn-{}-1-0.parquet", GROUP_ID).as_str(),
)
.match_body(mockito::Matcher::Json(serde_json::json!({
"file_shas": [FILE_SHA],
"group_id": GROUP_ID,
"file_number": 0,
"group_size": 1
})))
.with_status(200)
.with_body(r#"ok"#)
.create();
server
.mock(
"GET",
format!("/statusgroup/dataset/samplingn-{}-1-0.parquet", GROUP_ID).as_str(),
)
.with_status(200)
.with_body(r#"{"status": "accept", "input_flops": 1, "output_flops": 1000}"#)
.create();
let storage_provider = Arc::new(mock_storage);
let metrics_context = MetricsContext::new("0".to_string(), Some("0".to_string()));
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![config],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
true,
false,
0, // incomplete_group_grace_period_minutes (disabled)
InvalidationType::Hard,
InvalidationType::Hard,
Some(metrics_context),
);
let work_keys: Vec<String> = vec![FILE_SHA.to_string()];
let work_info = WorkInfo {
node_id: Address::from_str(NODE_ADDRESS).unwrap(),
work_units: U256::from(1000),
provider: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f68").unwrap(),
..Default::default()
};
for work_key in work_keys.clone() {
validator
.update_work_info_in_redis(&work_key, &work_info)
.await?;
}
let plan = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(plan.group_trigger_tasks.len(), 1);
assert_eq!(plan.group_trigger_tasks[0].group_id, GROUP_ID);
let metrics_0 = export_metrics().unwrap();
assert!(
metrics_0.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 1")
);
let group = validator.get_group(FILE_SHA).await?;
assert!(group.is_some());
let group = group.unwrap();
assert_eq!(group.group_id, GROUP_ID);
assert_eq!(group.group_size, 1);
assert_eq!(group.file_number, 0);
let result = validator.process_group_task(group).await;
assert!(result.is_ok());
let cache_status = validator
.get_work_validation_status_from_redis(FILE_SHA)
.await?;
assert_eq!(cache_status, Some(ValidationResult::Unknown));
let plan_2 = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(plan_2.group_trigger_tasks.len(), 0);
assert_eq!(plan_2.group_status_check_tasks.len(), 1);
let metrics = export_metrics().unwrap();
assert!(metrics.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 1"));
let result = validator
.process_group_status_check(plan_2.group_status_check_tasks[0].clone())
.await;
assert!(result.is_ok());
let cache_status = validator
.get_work_validation_status_from_redis(FILE_SHA)
.await?;
assert_eq!(cache_status, Some(ValidationResult::Accept));
let plan_3 = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(plan_3.group_trigger_tasks.len(), 0);
assert_eq!(plan_3.group_status_check_tasks.len(), 0);
let metrics_2 = export_metrics().unwrap();
assert!(
metrics_2.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 0")
);
assert!(metrics_2.contains("toploc_config_name=\"Qwen/Qwen0.6\""));
assert!(metrics_2.contains(&format!("validator_group_work_units_check_total{{group_id=\"{}\",pool_id=\"0\",result=\"match\",toploc_config_name=\"Qwen/Qwen0.6\",validator_id=\"0\"}} 1", GROUP_ID)));
Ok(())
}
#[tokio::test]
async fn test_group_e2e_work_unit_mismatch() -> Result<(), Error> {
let mut server = Server::new_async().await;
let (store, contracts) = setup_test_env()?;
let config = ToplocConfig {
server_url: server.url(),
file_prefix_filter: Some("Qwen/Qwen0.6".to_string()),
..Default::default()
};
const HONEST_NODE_ADDRESS: &str = "0x182555b0Ab39EE313f22c07dbe88D950385b1f69";
const HONEST_FILE_SHA: &str =
"c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641";
const EXCESSIVE_FILE_SHA: &str =
"88e4672c19e5a10bff2e23d223f8bfc38ae1425feaa18db9480e631a4fd98edf";
const EXCESSIVE_NODE_ADDRESS: &str = "0x182555b0Ab39EE313f22c07dbe88D950385b1f68";
const GROUP_ID: &str = "3456714426841564";
let mock_storage = MockStorageProvider::new();
mock_storage
.add_file(
&format!("Qwen/Qwen0.6/dataset/samplingn-{}-2-0-0.parquet", GROUP_ID),
"file1",
)
.await;
mock_storage
.add_file(
&format!("Qwen/Qwen0.6/dataset/samplingn-{}-2-0-1.parquet", GROUP_ID),
"file2",
)
.await;
mock_storage
.add_mapping_file(
HONEST_FILE_SHA,
&format!("Qwen/Qwen0.6/dataset/samplingn-{}-2-0-0.parquet", GROUP_ID),
)
.await;
mock_storage
.add_mapping_file(
EXCESSIVE_FILE_SHA,
&format!("Qwen/Qwen0.6/dataset/samplingn-{}-2-0-1.parquet", GROUP_ID),
)
.await;
server
.mock(
"POST",
format!("/validategroup/dataset/samplingn-{}-2-0.parquet", GROUP_ID).as_str(),
)
.match_body(mockito::Matcher::Json(serde_json::json!({
"file_shas": [HONEST_FILE_SHA, EXCESSIVE_FILE_SHA],
"group_id": GROUP_ID,
"file_number": 0,
"group_size": 2
})))
.with_status(200)
.with_body(r#"ok"#)
.create();
server
.mock(
"GET",
format!("/statusgroup/dataset/samplingn-{}-2-0.parquet", GROUP_ID).as_str(),
)
.with_status(200)
.with_body(r#"{"status": "accept", "input_flops": 1, "output_flops": 2000}"#)
.create();
let storage_provider = Arc::new(mock_storage);
let metrics_context = MetricsContext::new("0".to_string(), Some("0".to_string()));
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![config],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
true,
false,
0, // incomplete_group_grace_period_minutes (disabled)
InvalidationType::Hard,
InvalidationType::Hard,
Some(metrics_context),
);
let work_keys: Vec<String> = vec![HONEST_FILE_SHA.to_string(), EXCESSIVE_FILE_SHA.to_string()];
const EXPECTED_WORK_UNITS: u64 = 1000;
const EXCESSIVE_WORK_UNITS: u64 = 1500;
let work_info_1 = WorkInfo {
node_id: Address::from_str(HONEST_NODE_ADDRESS).unwrap(),
work_units: U256::from(EXPECTED_WORK_UNITS),
provider: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f68").unwrap(),
..Default::default()
};
let work_info_2 = WorkInfo {
node_id: Address::from_str(EXCESSIVE_NODE_ADDRESS).unwrap(),
work_units: U256::from(EXCESSIVE_WORK_UNITS),
provider: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f67").unwrap(),
..Default::default()
};
validator
.update_work_info_in_redis(HONEST_FILE_SHA, &work_info_1)
.await?;
validator
.update_work_info_in_redis(EXCESSIVE_FILE_SHA, &work_info_2)
.await?;
let plan = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(plan.group_trigger_tasks.len(), 1);
assert_eq!(plan.group_trigger_tasks[0].group_id, GROUP_ID);
let metrics_0 = export_metrics().unwrap();
assert!(
metrics_0.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 2")
);
let group = validator.get_group(HONEST_FILE_SHA).await?;
assert!(group.is_some());
let group = group.unwrap();
assert_eq!(group.group_id, GROUP_ID);
assert_eq!(group.group_size, 2);
assert_eq!(group.file_number, 0);
let result = validator.process_group_task(group).await;
assert!(result.is_ok());
let cache_status_1 = validator
.get_work_validation_status_from_redis(HONEST_FILE_SHA)
.await?;
assert_eq!(cache_status_1, Some(ValidationResult::Unknown));
let cache_status_2 = validator
.get_work_validation_status_from_redis(EXCESSIVE_FILE_SHA)
.await?;
assert_eq!(cache_status_2, Some(ValidationResult::Unknown));
let plan_2 = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(plan_2.group_trigger_tasks.len(), 0);
assert_eq!(plan_2.group_status_check_tasks.len(), 1);
let metrics = export_metrics().unwrap();
assert!(metrics.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 2"));
let result = validator
.process_group_status_check(plan_2.group_status_check_tasks[0].clone())
.await;
assert!(result.is_ok());
let cache_status_1 = validator
.get_work_validation_status_from_redis(HONEST_FILE_SHA)
.await?;
assert_eq!(cache_status_1, Some(ValidationResult::Accept));
let cache_status_2 = validator
.get_work_validation_status_from_redis(EXCESSIVE_FILE_SHA)
.await?;
assert_eq!(cache_status_2, Some(ValidationResult::Reject));
let plan_3 = validator.build_validation_plan(work_keys.clone()).await?;
assert_eq!(plan_3.group_trigger_tasks.len(), 0);
assert_eq!(plan_3.group_status_check_tasks.len(), 0);
let metrics_2 = export_metrics().unwrap();
assert!(metrics_2.contains(&format!("validator_group_validations_total{{group_id=\"{}\",pool_id=\"0\",result=\"accept\",toploc_config_name=\"Qwen/Qwen0.6\",validator_id=\"0\"}} 1", GROUP_ID)));
assert!(
metrics_2.contains("validator_work_keys_to_process{pool_id=\"0\",validator_id=\"0\"} 0")
);
assert!(metrics_2.contains("toploc_config_name=\"Qwen/Qwen0.6\""));
assert!(metrics_2.contains(&format!("validator_group_work_units_check_total{{group_id=\"{}\",pool_id=\"0\",result=\"mismatch\",toploc_config_name=\"Qwen/Qwen0.6\",validator_id=\"0\"}} 1", GROUP_ID)));
Ok(())
}
#[tokio::test]
async fn test_process_group_status_check_reject() -> Result<(), Error> {
let mut server = Server::new_async().await;
let _status_mock = server
.mock(
"GET",
"/statusgroup/dataset/samplingn-3450756714426841564-1-9.parquet",
)
.with_status(200)
.with_body(r#"{"status": "reject", "flops": 0.0, "failing_indices": [0]}"#)
.create();
let (store, contracts) = setup_test_env()?;
let config = ToplocConfig {
server_url: server.url(),
file_prefix_filter: Some("Qwen3".to_string()),
..Default::default()
};
let mock_storage = MockStorageProvider::new();
mock_storage
.add_file(
"Qwen3/dataset/samplingn-3450756714426841564-1-9-0.parquet",
"file1",
)
.await;
mock_storage
.add_mapping_file(
"c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641",
"Qwen3/dataset/samplingn-3450756714426841564-1-9-0.parquet",
)
.await;
let storage_provider = Arc::new(mock_storage);
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![config],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
true,
true,
0, // incomplete_group_grace_period_minutes (disabled)
InvalidationType::Hard,
InvalidationType::Hard,
None,
);
let group = validator
.get_group("c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641")
.await?;
let group = group.unwrap();
let result = validator.process_group_status_check(group).await;
assert!(result.is_ok());
let work_info = validator
.get_work_info_from_redis(
"c257e3d3fe866a00df1285f8bbbe601fed6b85229d983bbbb75e19a068346641",
)
.await?;
assert!(work_info.is_none(), "Work should be invalidated");
Ok(())
}
#[tokio::test]
async fn test_incomplete_group_recovery() -> Result<(), Error> {
let (store, contracts) = setup_test_env()?;
let mock_storage = MockStorageProvider::new();
// Create an incomplete group with only 1 of 2 expected files
const GROUP_ID: &str = "1234567890123456";
const FILE_SHA_1: &str = "a1b2c3d4e5f6789012345678901234567890123456789012345678901234abcd";
const FILE_SHA_2: &str = "b2c3d4e5f6789012345678901234567890123456789012345678901234bcde";
mock_storage
.add_file(
&format!("TestModel/dataset/test-{}-2-0-0.parquet", GROUP_ID),
"file1",
)
.await;
mock_storage
.add_file(
&format!("TestModel/dataset/test-{}-2-0-1.parquet", GROUP_ID),
"file2",
)
.await;
mock_storage
.add_mapping_file(
FILE_SHA_1,
&format!("TestModel/dataset/test-{}-2-0-0.parquet", GROUP_ID),
)
.await;
mock_storage
.add_mapping_file(
FILE_SHA_2,
&format!("TestModel/dataset/test-{}-2-0-1.parquet", GROUP_ID),
)
.await;
let storage_provider = Arc::new(mock_storage);
// Create validator with 1 minute grace period
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![ToplocConfig {
server_url: "http://localhost:8080".to_string(),
..Default::default()
}],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
true,
true, // disable_chain_invalidation for testing
1, // 1 minute grace period
InvalidationType::Hard,
InvalidationType::Hard,
None,
);
// Add work info for only the first file (making the group incomplete)
let work_info = WorkInfo {
node_id: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
work_units: U256::from(1000),
provider: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f68").unwrap(),
..Default::default()
};
validator
.update_work_info_in_redis(FILE_SHA_1, &work_info)
.await?;
// Try to get the group - should be None (incomplete) and should start tracking
let group = validator.get_group(FILE_SHA_1).await?;
assert!(group.is_none(), "Group should be incomplete");
// Check that the incomplete group is being tracked
let group_key = format!("group:{}:2:0", GROUP_ID);
let is_tracked = validator
.is_group_being_tracked_as_incomplete(&group_key)
.await?;
assert!(is_tracked, "Group should be tracked as incomplete");
// Simulate the group becoming complete by adding the second file
validator
.update_work_info_in_redis(FILE_SHA_2, &work_info)
.await?;
// Now the group should be complete (check from either file)
let group = validator.get_group(FILE_SHA_2).await?;
assert!(group.is_some(), "Group should now be complete");
let group = group.unwrap();
assert_eq!(group.sorted_work_keys.len(), 2);
// Should also work when checking from the first file
let group_from_first = validator.get_group(FILE_SHA_1).await?;
assert!(
group_from_first.is_some(),
"Group should also be complete when checked from first file"
);
// The incomplete group tracking should be removed
let is_still_tracked = validator
.is_group_being_tracked_as_incomplete(&group_key)
.await?;
assert!(
!is_still_tracked,
"Group should no longer be tracked as incomplete"
);
Ok(())
}
#[tokio::test]
async fn test_expired_incomplete_group_soft_invalidation() -> Result<(), Error> {
let (store, contracts) = setup_test_env()?;
let mock_storage = MockStorageProvider::new();
// Create an incomplete group
const GROUP_ID: &str = "9876543210987654";
const FILE_SHA_1: &str = "c1d2e3f4567890123456789012345678901234567890123456789012345cdef";
mock_storage
.add_file(
&format!("TestModel/dataset/test-{}-2-0-0.parquet", GROUP_ID),
"file1",
)
.await;
mock_storage
.add_mapping_file(
FILE_SHA_1,
&format!("TestModel/dataset/test-{}-2-0-0.parquet", GROUP_ID),
)
.await;
let storage_provider = Arc::new(mock_storage);
// Create validator with very short grace period for testing
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
contracts.prime_network.clone(),
vec![ToplocConfig {
server_url: "http://localhost:8080".to_string(),
..Default::default()
}],
U256::from(0),
storage_provider,
store,
CancellationToken::new(),
10,
60,
1,
10,
true,
true, // disable_chain_invalidation for testing
1, // 1 minute grace period (but we'll simulate expiry)
InvalidationType::Hard,
InvalidationType::Hard,
Some(MetricsContext::new("0".to_string(), Some("0".to_string()))),
);
// Add work info for only the first file (making the group incomplete)
let work_info = WorkInfo {
node_id: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(),
work_units: U256::from(1000),
provider: Address::from_str("0x182555b0Ab39EE313f22c07dbe88D950385b1f68").unwrap(),
..Default::default()
};
validator
.update_work_info_in_redis(FILE_SHA_1, &work_info)
.await?;
// Try to get the group - should be None (incomplete) and should start tracking
let group = validator.get_group(FILE_SHA_1).await?;
assert!(group.is_none(), "Group should be incomplete");
// Manually expire the incomplete group tracking by removing it and simulating expiry
// In a real test, you would wait for the actual expiry, but for testing we simulate it
let group_key = format!("group:{}:2:0", GROUP_ID);
validator.track_incomplete_group(&group_key).await?;
// Process groups past grace period (this would normally find groups past deadline)
// Since we can't easily simulate time passage in tests, we'll test the method exists
let tracking = validator
.is_group_being_tracked_as_incomplete(&group_key)
.await?;
assert!(tracking, "Group should still be tracked as incomplete");
// Update the deadline to simulate time passage
validator
.update_incomplete_group_deadline_relative(&group_key, -2)
.await?;
let result = validator.process_groups_past_grace_period().await;
assert!(
result.is_ok(),
"Should process groups past grace period without error"
);
let new_tracking = validator
.is_group_being_tracked_as_incomplete(&group_key)
.await?;
assert!(
!new_tracking,
"Group should no longer be tracked as incomplete"
);
let key_status = validator
.get_work_validation_status_from_redis(FILE_SHA_1)
.await?;
assert_eq!(key_status, Some(ValidationResult::IncompleteGroup));
let metrics = export_metrics().unwrap();
assert!(metrics.contains(&format!("validator_work_keys_soft_invalidated_total{{group_key=\"group:{}:2:0\",pool_id=\"0\",validator_id=\"0\"}} 1", GROUP_ID)));
Ok(())
}
#[tokio::test]
async fn test_incomplete_group_status_tracking() -> Result<(), Error> {
let (store, contracts) = setup_test_env()?;
let mock_storage = MockStorageProvider::new();
// Create an incomplete group scenario
const GROUP_ID: &str = "1111111111111111";
const FILE_SHA_1: &str = "1111111111111111111111111111111111111111111111111111111111111111";
mock_storage
.add_file(
&format!("TestModel/dataset/test-{}-3-0-0.parquet", GROUP_ID),
"file1",
)
.await;
mock_storage
.add_mapping_file(
FILE_SHA_1,
&format!("TestModel/dataset/test-{}-3-0-0.parquet", GROUP_ID),
)
.await;
let storage_provider = Arc::new(mock_storage);
let validator = SyntheticDataValidator::new(
"0".to_string(),
contracts.synthetic_data_validator.clone().unwrap(),
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | true |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/validator/src/p2p/mod.rs | crates/validator/src/p2p/mod.rs | use anyhow::{bail, Context as _, Result};
use futures::stream::FuturesUnordered;
use p2p::{Keypair, Protocols};
use shared::p2p::OutgoingRequest;
use shared::p2p::Service as P2PService;
use shared::web3::wallet::Wallet;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio_util::sync::CancellationToken;
pub struct Service {
inner: P2PService,
// converts this validator's hardware challenges to outgoing requests to workers
outgoing_message_tx: Sender<OutgoingRequest>,
hardware_challenge_rx: Receiver<HardwareChallengeRequest>,
}
impl Service {
pub fn new(
keypair: Keypair,
port: u16,
cancellation_token: CancellationToken,
wallet: Wallet,
) -> Result<(Self, Sender<HardwareChallengeRequest>)> {
let (hardware_challenge_tx, hardware_challenge_rx) = tokio::sync::mpsc::channel(100);
let (inner, outgoing_message_tx) = P2PService::new(
keypair,
port,
cancellation_token.clone(),
wallet,
Protocols::new()
.with_hardware_challenge()
.with_authentication(),
)
.context("failed to create p2p service")?;
Ok((
Self {
inner,
outgoing_message_tx,
hardware_challenge_rx,
},
hardware_challenge_tx,
))
}
pub async fn run(self) -> Result<()> {
use futures::StreamExt as _;
let Self {
inner,
outgoing_message_tx,
mut hardware_challenge_rx,
} = self;
tokio::task::spawn(inner.run());
let mut futures = FuturesUnordered::new();
loop {
tokio::select! {
Some(request) = hardware_challenge_rx.recv() => {
println!("p2p: got hardware challenge");
let (incoming_resp_tx, incoming_resp_rx) = tokio::sync::oneshot::channel();
let fut = async move {
let resp = match incoming_resp_rx.await.context("outgoing request tx channel was dropped")? {
p2p::Response::HardwareChallenge(resp) => resp.response,
_ => bail!("unexpected response type for hardware challenge request"),
};
request.response_tx.send(resp).map_err(|_|anyhow::anyhow!("caller dropped response channel"))?;
Ok(())
};
futures.push(fut);
let outgoing_request = OutgoingRequest {
peer_wallet_address: request.worker_wallet_address,
peer_id: request.worker_p2p_id,
multiaddrs: request.worker_addresses,
request: p2p::HardwareChallengeRequest {
challenge: request.challenge,
timestamp: std::time::SystemTime::now(),
}.into(),
response_tx: incoming_resp_tx,
};
outgoing_message_tx.send(outgoing_request).await
.context("failed to send outgoing hardware challenge request")?;
}
Some(res) = futures.next() => {
if let Err(e) = res {
log::error!("failed to handle response conversion: {e}");
}
}
}
}
}
}
pub struct HardwareChallengeRequest {
pub(crate) worker_wallet_address: alloy::primitives::Address,
pub(crate) worker_p2p_id: String,
pub(crate) worker_addresses: Vec<String>,
pub(crate) challenge: p2p::ChallengeRequest,
pub(crate) response_tx: tokio::sync::oneshot::Sender<p2p::ChallengeResponse>,
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/p2p/src/lib.rs | crates/p2p/src/lib.rs | use anyhow::Context;
use anyhow::Result;
use libp2p::noise;
use libp2p::swarm::SwarmEvent;
use libp2p::tcp;
use libp2p::yamux;
use libp2p::Swarm;
use libp2p::SwarmBuilder;
use libp2p::{identity, Transport};
use log::debug;
use std::time::Duration;
mod behaviour;
mod message;
mod protocol;
use behaviour::Behaviour;
pub use message::*;
pub use protocol::*;
pub type Libp2pIncomingMessage = libp2p::request_response::Message<Request, Response>;
pub type ResponseChannel = libp2p::request_response::ResponseChannel<Response>;
pub type PeerId = libp2p::PeerId;
pub type Multiaddr = libp2p::Multiaddr;
pub type Keypair = libp2p::identity::Keypair;
pub const PRIME_STREAM_PROTOCOL: libp2p::StreamProtocol =
libp2p::StreamProtocol::new("/prime/1.0.0");
// TODO: force this to be passed by the user
pub const DEFAULT_AGENT_VERSION: &str = "prime-node/0.1.0";
pub struct Node {
peer_id: PeerId,
listen_addrs: Vec<libp2p::Multiaddr>,
swarm: Swarm<Behaviour>,
bootnodes: Vec<Multiaddr>,
cancellation_token: tokio_util::sync::CancellationToken,
// channel for sending incoming messages to the consumer of this library
incoming_message_tx: tokio::sync::mpsc::Sender<IncomingMessage>,
// channel for receiving outgoing messages from the consumer of this library
outgoing_message_rx: tokio::sync::mpsc::Receiver<OutgoingMessage>,
}
impl Node {
pub fn peer_id(&self) -> PeerId {
self.peer_id
}
pub fn listen_addrs(&self) -> &[libp2p::Multiaddr] {
&self.listen_addrs
}
/// Returns the multiaddresses that this node is listening on, with the peer ID included.
pub fn multiaddrs(&self) -> Vec<libp2p::Multiaddr> {
self.listen_addrs
.iter()
.map(|addr| {
addr.clone()
.with_p2p(self.peer_id)
.expect("can add peer ID to multiaddr")
})
.collect()
}
pub async fn run(self) -> Result<()> {
use libp2p::futures::StreamExt as _;
let Node {
peer_id: _,
listen_addrs,
mut swarm,
bootnodes,
cancellation_token,
incoming_message_tx,
mut outgoing_message_rx,
} = self;
for addr in listen_addrs {
swarm
.listen_on(addr)
.context("swarm failed to listen on multiaddr")?;
}
for bootnode in bootnodes {
match swarm.dial(bootnode.clone()) {
Ok(_) => {}
Err(e) => {
debug!("failed to dial bootnode {bootnode}: {e:?}");
}
}
}
loop {
tokio::select! {
biased;
_ = cancellation_token.cancelled() => {
debug!("cancellation token triggered, shutting down node");
break Ok(());
}
Some(message) = outgoing_message_rx.recv() => {
match message {
OutgoingMessage::Request((peer, addrs, request)) => {
// TODO: if we're not connected to the peer, we should dial it
for addr in addrs {
swarm.add_peer_address(peer, addr);
}
swarm.behaviour_mut().request_response().send_request(&peer, request);
}
OutgoingMessage::Response((channel, response)) => {
if let Err(e) = swarm.behaviour_mut().request_response().send_response(channel, response) {
debug!("failed to send response: {e:?}");
}
}
}
}
event = swarm.select_next_some() => {
match event {
SwarmEvent::NewListenAddr {
address,
..
} => {
debug!("new listen address: {address}");
}
SwarmEvent::ExternalAddrConfirmed { address } => {
debug!("external address confirmed: {address}");
}
SwarmEvent::ConnectionEstablished {
peer_id,
..
} => {
debug!("connection established with peer {peer_id}");
}
SwarmEvent::ConnectionClosed {
peer_id,
cause,
..
} => {
debug!("connection closed with peer {peer_id}: {cause:?}");
}
SwarmEvent::Behaviour(event) => event.handle(incoming_message_tx.clone()).await,
_ => continue,
}
},
}
}
}
}
pub struct NodeBuilder {
port: Option<u16>,
listen_addrs: Vec<libp2p::Multiaddr>,
keypair: Option<identity::Keypair>,
agent_version: Option<String>,
protocols: Protocols,
bootnodes: Vec<Multiaddr>,
cancellation_token: Option<tokio_util::sync::CancellationToken>,
}
impl Default for NodeBuilder {
fn default() -> Self {
Self::new()
}
}
impl NodeBuilder {
pub fn new() -> Self {
Self {
port: None,
listen_addrs: Vec::new(),
keypair: None,
agent_version: None,
protocols: Protocols::new(),
bootnodes: Vec::new(),
cancellation_token: None,
}
}
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
pub fn with_listen_addr(mut self, addr: libp2p::Multiaddr) -> Self {
self.listen_addrs.push(addr);
self
}
pub fn with_keypair(mut self, keypair: identity::Keypair) -> Self {
self.keypair = Some(keypair);
self
}
pub fn with_agent_version(mut self, agent_version: String) -> Self {
self.agent_version = Some(agent_version);
self
}
pub fn with_authentication(mut self) -> Self {
self.protocols = self.protocols.with_authentication();
self
}
pub fn with_hardware_challenge(mut self) -> Self {
self.protocols = self.protocols.with_hardware_challenge();
self
}
pub fn with_invite(mut self) -> Self {
self.protocols = self.protocols.with_invite();
self
}
pub fn with_get_task_logs(mut self) -> Self {
self.protocols = self.protocols.with_get_task_logs();
self
}
pub fn with_restart(mut self) -> Self {
self.protocols = self.protocols.with_restart();
self
}
pub fn with_general(mut self) -> Self {
self.protocols = self.protocols.with_general();
self
}
pub fn with_protocols(mut self, protocols: Protocols) -> Self {
self.protocols.join(protocols);
self
}
pub fn with_bootnode(mut self, bootnode: Multiaddr) -> Self {
self.bootnodes.push(bootnode);
self
}
pub fn with_bootnodes<I, T>(mut self, bootnodes: I) -> Self
where
I: IntoIterator<Item = T>,
T: Into<Multiaddr>,
{
for bootnode in bootnodes {
self.bootnodes.push(bootnode.into());
}
self
}
pub fn with_cancellation_token(
mut self,
cancellation_token: tokio_util::sync::CancellationToken,
) -> Self {
self.cancellation_token = Some(cancellation_token);
self
}
pub fn try_build(
self,
) -> Result<(
Node,
tokio::sync::mpsc::Receiver<IncomingMessage>,
tokio::sync::mpsc::Sender<OutgoingMessage>,
)> {
let Self {
port,
mut listen_addrs,
keypair,
agent_version,
protocols,
bootnodes,
cancellation_token,
} = self;
let keypair = keypair.unwrap_or(identity::Keypair::generate_ed25519());
let peer_id = keypair.public().to_peer_id();
let transport = create_transport(&keypair)?;
let behaviour = Behaviour::new(
&keypair,
protocols,
agent_version.unwrap_or(DEFAULT_AGENT_VERSION.to_string()),
)
.context("failed to create behaviour")?;
let swarm = SwarmBuilder::with_existing_identity(keypair)
.with_tokio()
.with_other_transport(|_| transport)?
.with_behaviour(|_| behaviour)?
.with_swarm_config(|cfg| {
cfg.with_idle_connection_timeout(Duration::from_secs(u64::MAX)) // don't disconnect from idle peers
})
.build();
if listen_addrs.is_empty() {
let port = port.unwrap_or(0);
let listen_addr = format!("/ip4/0.0.0.0/tcp/{port}")
.parse()
.expect("can parse valid multiaddr");
listen_addrs.push(listen_addr);
}
let (incoming_message_tx, incoming_message_rx) = tokio::sync::mpsc::channel(100);
let (outgoing_message_tx, outgoing_message_rx) = tokio::sync::mpsc::channel(100);
Ok((
Node {
peer_id,
swarm,
listen_addrs,
bootnodes,
incoming_message_tx,
outgoing_message_rx,
cancellation_token: cancellation_token.unwrap_or_default(),
},
incoming_message_rx,
outgoing_message_tx,
))
}
}
fn create_transport(
keypair: &identity::Keypair,
) -> Result<libp2p::core::transport::Boxed<(PeerId, libp2p::core::muxing::StreamMuxerBox)>> {
let transport = tcp::tokio::Transport::new(tcp::Config::default())
.upgrade(libp2p::core::upgrade::Version::V1)
.authenticate(noise::Config::new(keypair)?)
.multiplex(yamux::Config::default())
.timeout(Duration::from_secs(20))
.boxed();
Ok(transport)
}
#[cfg(test)]
mod test {
use super::NodeBuilder;
use crate::message;
#[tokio::test]
async fn two_nodes_can_connect_and_do_request_response() {
let (node1, mut incoming_message_rx1, outgoing_message_tx1) =
NodeBuilder::new().with_get_task_logs().try_build().unwrap();
let node1_peer_id = node1.peer_id();
let (node2, mut incoming_message_rx2, outgoing_message_tx2) = NodeBuilder::new()
.with_get_task_logs()
.with_bootnodes(node1.multiaddrs())
.try_build()
.unwrap();
let node2_peer_id = node2.peer_id();
tokio::spawn(async move { node1.run().await });
tokio::spawn(async move { node2.run().await });
// TODO: implement a way to get peer count
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
// send request from node1->node2
let request = message::Request::GetTaskLogs;
outgoing_message_tx1
.send(request.into_outgoing_message(node2_peer_id, vec![]))
.await
.unwrap();
let message = incoming_message_rx2.recv().await.unwrap();
assert_eq!(message.peer, node1_peer_id);
let libp2p::request_response::Message::Request {
request_id: _,
request: message::Request::GetTaskLogs,
channel,
} = message.message
else {
panic!("expected a GetTaskLogs request message");
};
// send response from node2->node1
let response =
message::Response::GetTaskLogs(message::GetTaskLogsResponse::Ok("logs".to_string()));
outgoing_message_tx2
.send(response.into_outgoing_message(channel))
.await
.unwrap();
let message = incoming_message_rx1.recv().await.unwrap();
assert_eq!(message.peer, node2_peer_id);
let libp2p::request_response::Message::Response {
request_id: _,
response: message::Response::GetTaskLogs(response),
} = message.message
else {
panic!("expected a GetTaskLogs response message");
};
let message::GetTaskLogsResponse::Ok(logs) = response else {
panic!("expected a successful GetTaskLogs response");
};
assert_eq!(logs, "logs");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/p2p/src/behaviour.rs | crates/p2p/src/behaviour.rs | use anyhow::Context as _;
use anyhow::Result;
use libp2p::autonat;
use libp2p::connection_limits;
use libp2p::connection_limits::ConnectionLimits;
use libp2p::identify;
use libp2p::identity;
use libp2p::kad;
// use libp2p::kad::store::MemoryStore;
use libp2p::mdns;
use libp2p::ping;
use libp2p::request_response;
use libp2p::swarm::NetworkBehaviour;
use log::debug;
use std::time::Duration;
use crate::message::IncomingMessage;
use crate::message::{Request, Response};
use crate::Protocols;
use crate::PRIME_STREAM_PROTOCOL;
const DEFAULT_MAX_PEER_COUNT: u32 = 100;
#[derive(NetworkBehaviour)]
#[behaviour(to_swarm = "BehaviourEvent")]
pub(crate) struct Behaviour {
// connection gating
connection_limits: connection_limits::Behaviour,
// discovery
mdns: mdns::tokio::Behaviour,
// comment out kademlia for now as it requires bootnodes to be provided
// kademlia: kad::Behaviour<MemoryStore>,
// protocols
identify: identify::Behaviour,
ping: ping::Behaviour,
request_response: request_response::cbor::Behaviour<Request, Response>,
// nat traversal
autonat: autonat::Behaviour,
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub(crate) enum BehaviourEvent {
Autonat(autonat::Event),
Identify(identify::Event),
Kademlia(kad::Event),
Mdns(mdns::Event),
Ping(ping::Event),
RequestResponse(request_response::Event<Request, Response>),
}
impl From<void::Void> for BehaviourEvent {
fn from(_: void::Void) -> Self {
unreachable!("void::Void cannot be converted to BehaviourEvent")
}
}
impl From<autonat::Event> for BehaviourEvent {
fn from(event: autonat::Event) -> Self {
BehaviourEvent::Autonat(event)
}
}
impl From<kad::Event> for BehaviourEvent {
fn from(event: kad::Event) -> Self {
BehaviourEvent::Kademlia(event)
}
}
impl From<libp2p::mdns::Event> for BehaviourEvent {
fn from(event: libp2p::mdns::Event) -> Self {
BehaviourEvent::Mdns(event)
}
}
impl From<ping::Event> for BehaviourEvent {
fn from(event: ping::Event) -> Self {
BehaviourEvent::Ping(event)
}
}
impl From<identify::Event> for BehaviourEvent {
fn from(event: identify::Event) -> Self {
BehaviourEvent::Identify(event)
}
}
impl From<request_response::Event<Request, Response>> for BehaviourEvent {
fn from(event: request_response::Event<Request, Response>) -> Self {
BehaviourEvent::RequestResponse(event)
}
}
impl Behaviour {
pub(crate) fn new(
keypair: &identity::Keypair,
protocols: Protocols,
agent_version: String,
) -> Result<Self> {
let peer_id = keypair.public().to_peer_id();
let protocols = protocols.into_iter().map(|protocol| {
(
protocol.as_stream_protocol(),
request_response::ProtocolSupport::Full, // TODO: configure inbound/outbound based on node role and protocol
)
});
let autonat = autonat::Behaviour::new(peer_id, autonat::Config::default());
let connection_limits = connection_limits::Behaviour::new(
ConnectionLimits::default().with_max_established(Some(DEFAULT_MAX_PEER_COUNT)),
);
let mdns = mdns::tokio::Behaviour::new(mdns::Config::default(), peer_id)
.context("failed to create mDNS behaviour")?;
// let kademlia = kad::Behaviour::new(peer_id, MemoryStore::new(peer_id));
let identify = identify::Behaviour::new(
identify::Config::new(PRIME_STREAM_PROTOCOL.to_string(), keypair.public())
.with_agent_version(agent_version),
);
let ping = ping::Behaviour::new(ping::Config::new().with_interval(Duration::from_secs(10)));
Ok(Self {
autonat,
connection_limits,
// kademlia,
mdns,
identify,
ping,
request_response: request_response::cbor::Behaviour::new(
protocols,
request_response::Config::default(),
),
})
}
pub(crate) fn request_response(
&mut self,
) -> &mut request_response::cbor::Behaviour<Request, Response> {
&mut self.request_response
}
}
impl BehaviourEvent {
pub(crate) async fn handle(self, message_tx: tokio::sync::mpsc::Sender<IncomingMessage>) {
match self {
BehaviourEvent::Autonat(_event) => {}
BehaviourEvent::Identify(_event) => {}
BehaviourEvent::Kademlia(_event) => { // TODO: potentially on outbound queries
}
BehaviourEvent::Mdns(_event) => {}
BehaviourEvent::Ping(_event) => {}
BehaviourEvent::RequestResponse(event) => match event {
request_response::Event::Message { peer, message } => {
debug!("received message from peer {peer:?}: {message:?}");
// if this errors, user dropped their incoming message channel
let _ = message_tx.send(IncomingMessage { peer, message }).await;
}
request_response::Event::ResponseSent { peer, request_id } => {
debug!("response sent to peer {peer:?} for request ID {request_id:?}");
}
request_response::Event::InboundFailure {
peer,
request_id,
error,
} => {
debug!(
"inbound failure from peer {peer:?} for request ID {request_id:?}: {error}"
);
}
request_response::Event::OutboundFailure {
peer,
request_id,
error,
} => {
debug!(
"outbound failure to peer {peer:?} for request ID {request_id:?}: {error}"
);
}
},
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.