| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use serde::{Deserialize, Serialize}; |
| use serde_json::{json, Value}; |
| use std::collections::HashMap; |
| use std::sync::{Arc, Mutex}; |
| use std::time::Instant; |
|
|
| |
| |
| |
|
|
| |
| pub type TaskId = String; |
|
|
| |
| pub type StreamId = String; |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct PipelineTask { |
| |
| pub task_id: TaskId, |
| |
| pub stream_id: StreamId, |
| |
| pub tool: String, |
| |
| pub args: Value, |
| |
| #[serde(default)] |
| pub chain_next: Option<Box<PipelineTask>>, |
| |
| #[serde(default)] |
| pub priority: u8, |
| |
| #[serde(default)] |
| pub timeout_ms: u64, |
| |
| #[serde(default)] |
| pub pipe_field: Option<String>, |
| } |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct PipelineResult { |
| |
| pub task_id: TaskId, |
| |
| pub stream_id: StreamId, |
| |
| pub status: PipelineStatus, |
| |
| pub result: Value, |
| |
| #[serde(default)] |
| pub error: Option<String>, |
| |
| pub duration_ms: u64, |
| |
| pub executed_by: String, |
| } |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] |
| #[serde(rename_all = "lowercase")] |
| pub enum PipelineStatus { |
| |
| Ok, |
| |
| Error, |
| |
| Blocked, |
| |
| Timeout, |
| |
| Queued, |
| |
| Running, |
| } |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct PipelineBatch { |
| |
| pub stream_id: StreamId, |
| |
| pub tasks: Vec<PipelineTask>, |
| |
| pub mode: BatchMode, |
| |
| pub submitted_by: String, |
| |
| pub submitted_at: String, |
| } |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] |
| #[serde(rename_all = "lowercase")] |
| pub enum BatchMode { |
| |
| Parallel, |
| |
| Chain, |
| |
| Priority, |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct PipelineState { |
| |
| queues: HashMap<StreamId, TaskQueue>, |
| |
| results: HashMap<TaskId, PipelineResult>, |
| |
| pub max_concurrent_per_stream: usize, |
| |
| pub max_total_queued: usize, |
| |
| pub total_submitted: u64, |
| |
| pub total_completed: u64, |
| |
| pub total_failed: u64, |
| } |
|
|
| |
| struct TaskQueue { |
| |
| pending: Vec<PipelineTask>, |
| |
| running: Vec<TaskId>, |
| |
| completed: Vec<TaskId>, |
| |
| created_at: Instant, |
| } |
|
|
| impl PipelineState { |
| |
| pub fn new() -> Self { |
| Self { |
| queues: HashMap::new(), |
| results: HashMap::new(), |
| max_concurrent_per_stream: 8, |
| max_total_queued: 256, |
| total_submitted: 0, |
| total_completed: 0, |
| total_failed: 0, |
| } |
| } |
|
|
| |
| |
| pub fn submit_batch(&mut self, batch: PipelineBatch) -> Result<StreamId, PipelineError> { |
| |
| let total_queued: usize = self.queues.values() |
| .map(|q| q.pending.len() + q.running.len()) |
| .sum(); |
|
|
| if total_queued + batch.tasks.len() > self.max_total_queued { |
| return Err(PipelineError::BackpressureExceeded { |
| current: total_queued, |
| limit: self.max_total_queued, |
| attempted: batch.tasks.len(), |
| }); |
| } |
|
|
| let stream_id = batch.stream_id.clone(); |
|
|
| |
| let queue = self.queues.entry(stream_id.clone()).or_insert_with(|| { |
| TaskQueue { |
| pending: Vec::new(), |
| running: Vec::new(), |
| completed: Vec::new(), |
| created_at: Instant::now(), |
| } |
| }); |
|
|
| |
| match batch.mode { |
| BatchMode::Parallel => { |
| |
| for task in batch.tasks { |
| self.total_submitted += 1; |
| queue.pending.push(task); |
| } |
| } |
| BatchMode::Chain => { |
| |
| if let Some(first) = Self::build_chain(batch.tasks) { |
| self.total_submitted += 1; |
| queue.pending.push(first); |
| } |
| } |
| BatchMode::Priority => { |
| |
| let mut tasks = batch.tasks; |
| tasks.sort_by(|a, b| b.priority.cmp(&a.priority)); |
| for task in tasks { |
| self.total_submitted += 1; |
| queue.pending.push(task); |
| } |
| } |
| } |
|
|
| Ok(stream_id) |
| } |
|
|
| |
| fn build_chain(tasks: Vec<PipelineTask>) -> Option<PipelineTask> { |
| if tasks.is_empty() { |
| return None; |
| } |
|
|
| let mut chain = tasks; |
| chain.reverse(); |
|
|
| let mut current = chain.remove(0); |
| for mut task in chain { |
| task.chain_next = Some(Box::new(current)); |
| current = task; |
| } |
| Some(current) |
| } |
|
|
| |
| |
| pub fn next_task(&mut self, stream_id: &str) -> Option<PipelineTask> { |
| let queue = self.queues.get_mut(stream_id)?; |
|
|
| |
| if queue.running.len() >= self.max_concurrent_per_stream { |
| return None; |
| } |
|
|
| let task = queue.pending.pop()?; |
| queue.running.push(task.task_id.clone()); |
| Some(task) |
| } |
|
|
| |
| |
| pub fn next_tasks(&mut self, limit: usize) -> Vec<PipelineTask> { |
| let mut tasks = Vec::new(); |
| let stream_ids: Vec<String> = self.queues.keys().cloned().collect(); |
|
|
| for stream_id in &stream_ids { |
| if tasks.len() >= limit { |
| break; |
| } |
| if let Some(task) = self.next_task(stream_id) { |
| tasks.push(task); |
| } |
| } |
| tasks |
| } |
|
|
| |
| |
| pub fn record_result(&mut self, result: PipelineResult) -> Option<PipelineTask> { |
| let task_id = result.task_id.clone(); |
| let stream_id = result.stream_id.clone(); |
| let succeeded = result.status == PipelineStatus::Ok; |
|
|
| |
| if succeeded { |
| self.total_completed += 1; |
| } else { |
| self.total_failed += 1; |
| } |
|
|
| |
| if let Some(queue) = self.queues.get_mut(&stream_id) { |
| queue.running.retain(|id| id != &task_id); |
| queue.completed.push(task_id.clone()); |
| } |
|
|
| |
| self.results.insert(task_id, result.clone()); |
|
|
| |
| if !succeeded { |
| return None; |
| } |
|
|
| None |
| } |
|
|
| |
| pub fn get_result(&self, task_id: &str) -> Option<&PipelineResult> { |
| self.results.get(task_id) |
| } |
|
|
| |
| pub fn stream_results(&self, stream_id: &str) -> Vec<&PipelineResult> { |
| self.results.values() |
| .filter(|r| r.stream_id == stream_id) |
| .collect() |
| } |
|
|
| |
| pub fn is_stream_complete(&self, stream_id: &str) -> bool { |
| match self.queues.get(stream_id) { |
| None => true, |
| Some(queue) => queue.pending.is_empty() && queue.running.is_empty(), |
| } |
| } |
|
|
| |
| pub fn status_summary(&self) -> Value { |
| let active_streams = self.queues.len(); |
| let total_pending: usize = self.queues.values().map(|q| q.pending.len()).sum(); |
| let total_running: usize = self.queues.values().map(|q| q.running.len()).sum(); |
|
|
| json!({ |
| "active_streams": active_streams, |
| "total_pending": total_pending, |
| "total_running": total_running, |
| "total_submitted": self.total_submitted, |
| "total_completed": self.total_completed, |
| "total_failed": self.total_failed, |
| "max_concurrent_per_stream": self.max_concurrent_per_stream, |
| "max_total_queued": self.max_total_queued, |
| }) |
| } |
|
|
| |
| pub fn cleanup_completed(&mut self, max_age_secs: u64) { |
| let now = Instant::now(); |
| let to_remove: Vec<String> = self.queues.iter() |
| .filter(|(_, q)| { |
| q.pending.is_empty() |
| && q.running.is_empty() |
| && now.duration_since(q.created_at).as_secs() > max_age_secs |
| }) |
| .map(|(id, _)| id.clone()) |
| .collect(); |
|
|
| for id in &to_remove { |
| self.queues.remove(id); |
| |
| self.results.retain(|_, r| &r.stream_id != id); |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub enum PipelineError { |
| BackpressureExceeded { |
| current: usize, |
| limit: usize, |
| attempted: usize, |
| }, |
| StreamNotFound(String), |
| TaskNotFound(String), |
| InvalidTask(String), |
| } |
|
|
| impl std::fmt::Display for PipelineError { |
| fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
| match self { |
| PipelineError::BackpressureExceeded { current, limit, attempted } => { |
| write!(f, "Backpressure: {}/{} queued, cannot add {} more", current, limit, attempted) |
| } |
| PipelineError::StreamNotFound(id) => write!(f, "Stream not found: {}", id), |
| PipelineError::TaskNotFound(id) => write!(f, "Task not found: {}", id), |
| PipelineError::InvalidTask(msg) => write!(f, "Invalid task: {}", msg), |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn task_to_frame(task: &PipelineTask) -> crate::framing::Frame { |
| let payload = serde_json::to_vec(task).unwrap_or_default(); |
| crate::framing::Frame::pipeline_task(payload) |
| } |
|
|
| |
| pub fn task_from_frame(frame: &crate::framing::Frame) -> Result<PipelineTask, String> { |
| serde_json::from_slice(&frame.payload) |
| .map_err(|e| format!("Failed to decode PipelineTask: {}", e)) |
| } |
|
|
| |
| pub fn result_to_frame(result: &PipelineResult) -> crate::framing::Frame { |
| let payload = serde_json::to_vec(result).unwrap_or_default(); |
| crate::framing::Frame::pipeline_result(payload) |
| } |
|
|
| |
| pub fn result_from_frame(frame: &crate::framing::Frame) -> Result<PipelineResult, String> { |
| serde_json::from_slice(&frame.payload) |
| .map_err(|e| format!("Failed to decode PipelineResult: {}", e)) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| pub fn handle_pipeline_task( |
| frame: &crate::framing::Frame, |
| state: &Arc<crate::http::ServerState>, |
| peer_key: &str, |
| ) -> Option<crate::framing::Frame> { |
| let task = match task_from_frame(frame) { |
| Ok(t) => t, |
| Err(e) => { |
| let err_result = PipelineResult { |
| task_id: "unknown".to_string(), |
| stream_id: "unknown".to_string(), |
| status: PipelineStatus::Error, |
| result: Value::Null, |
| error: Some(format!("Decode error: {}", e)), |
| duration_ms: 0, |
| executed_by: "self".to_string(), |
| }; |
| return Some(result_to_frame(&err_result)); |
| } |
| }; |
|
|
| let start = Instant::now(); |
|
|
| |
| let source = crate::dispatch::Source::Pipeline { |
| stream_id: task.stream_id.clone(), |
| peer_key: peer_key.to_string(), |
| }; |
|
|
| let response = crate::dispatch::call(state, source, &task.tool, &task.args); |
|
|
| let duration_ms = start.elapsed().as_millis() as u64; |
|
|
| let status = if response.status == "ok" { |
| PipelineStatus::Ok |
| } else { |
| PipelineStatus::Blocked |
| }; |
|
|
| let result = PipelineResult { |
| task_id: task.task_id, |
| stream_id: task.stream_id, |
| status, |
| result: response.result, |
| error: if response.status != "ok" { |
| Some(response.status.clone()) |
| } else { |
| None |
| }, |
| duration_ms, |
| executed_by: "self".to_string(), |
| }; |
|
|
| |
| |
| if result.status == PipelineStatus::Ok { |
| if let Some(mut next_task) = task.chain_next.map(|b| *b) { |
| |
| if let Some(pipe_field) = &task.pipe_field { |
| if let Some(piped_value) = result.result.get(pipe_field) { |
| if let Some(args_obj) = next_task.args.as_object_mut() { |
| args_obj.insert("input".to_string(), piped_value.clone()); |
| } |
| } |
| } |
|
|
| |
| let chain_frame = task_to_frame(&next_task); |
| return handle_pipeline_task(&chain_frame, state, peer_key); |
| } |
| } |
|
|
| Some(result_to_frame(&result)) |
| } |
|
|
| |
| |
| pub fn handle_pipeline_result( |
| frame: &crate::framing::Frame, |
| pipeline: &Arc<Mutex<PipelineState>>, |
| ) -> Option<crate::framing::Frame> { |
| let result = match result_from_frame(frame) { |
| Ok(r) => r, |
| Err(_) => return None, |
| }; |
|
|
| let mut state = pipeline.lock().unwrap(); |
| state.record_result(result); |
|
|
| None |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub fn handle_mesh_pipeline( |
| args: &Value, |
| pipeline: &Arc<Mutex<PipelineState>>, |
| ) -> Value { |
| let peer_key = match args.get("peer_key").and_then(|v| v.as_str()) { |
| Some(k) => k, |
| None => return json!({"type": "text", "text": "ERROR: 'peer_key' required"}), |
| }; |
|
|
| let tasks_val = match args.get("tasks").and_then(|v| v.as_array()) { |
| Some(t) => t, |
| None => return json!({"type": "text", "text": "ERROR: 'tasks' array required"}), |
| }; |
|
|
| let mode = match args.get("mode").and_then(|v| v.as_str()).unwrap_or("parallel") { |
| "chain" => BatchMode::Chain, |
| "priority" => BatchMode::Priority, |
| _ => BatchMode::Parallel, |
| }; |
|
|
| let stream_id = format!("pipe_{}_{}", &peer_key[..8.min(peer_key.len())], |
| chrono::Utc::now().timestamp_millis()); |
|
|
| |
| let mut tasks = Vec::new(); |
| for (i, task_val) in tasks_val.iter().enumerate() { |
| let tool = match task_val.get("tool").and_then(|v| v.as_str()) { |
| Some(t) => t.to_string(), |
| None => return json!({"type": "text", "text": format!("ERROR: task[{}] missing 'tool'", i)}), |
| }; |
| let task_args = task_val.get("args").cloned().unwrap_or(json!({})); |
| let task_id = format!("{}_{}", stream_id, i); |
|
|
| tasks.push(PipelineTask { |
| task_id, |
| stream_id: stream_id.clone(), |
| tool, |
| args: task_args, |
| chain_next: None, |
| priority: task_val.get("priority").and_then(|v| v.as_u64()).unwrap_or(0) as u8, |
| timeout_ms: task_val.get("timeout_ms").and_then(|v| v.as_u64()).unwrap_or(0), |
| pipe_field: task_val.get("pipe_field").and_then(|v| v.as_str()).map(String::from), |
| }); |
| } |
|
|
| let batch = PipelineBatch { |
| stream_id: stream_id.clone(), |
| tasks, |
| mode: mode.clone(), |
| submitted_by: "local".to_string(), |
| submitted_at: chrono::Utc::now().to_rfc3339(), |
| }; |
|
|
| let task_count = batch.tasks.len(); |
| let mut state = pipeline.lock().unwrap(); |
| match state.submit_batch(batch) { |
| Ok(sid) => { |
| json!({"type": "text", "text": format!( |
| "Pipeline submitted: {} tasks ({:?} mode)\nStream: {}\nTarget: {}", |
| task_count, mode, sid, peer_key |
| )}) |
| } |
| Err(e) => { |
| json!({"type": "text", "text": format!("Pipeline error: {}", e)}) |
| } |
| } |
| } |
|
|
| |
| pub fn handle_pipeline_status(pipeline: &Arc<Mutex<PipelineState>>) -> Value { |
| let state = pipeline.lock().unwrap(); |
| let summary = state.status_summary(); |
| json!({"type": "text", "text": format!("Pipeline Status:\n{}", |
| serde_json::to_string_pretty(&summary).unwrap_or_default())}) |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| fn make_task(id: &str, tool: &str) -> PipelineTask { |
| PipelineTask { |
| task_id: id.to_string(), |
| stream_id: "test_stream".to_string(), |
| tool: tool.to_string(), |
| args: json!({"test": true}), |
| chain_next: None, |
| priority: 0, |
| timeout_ms: 0, |
| pipe_field: None, |
| } |
| } |
|
|
| fn make_result(id: &str, status: PipelineStatus) -> PipelineResult { |
| PipelineResult { |
| task_id: id.to_string(), |
| stream_id: "test_stream".to_string(), |
| status, |
| result: json!({"output": "test"}), |
| error: None, |
| duration_ms: 42, |
| executed_by: "test_worker".to_string(), |
| } |
| } |
|
|
| #[test] |
| fn test_pipeline_state_new() { |
| let state = PipelineState::new(); |
| assert_eq!(state.total_submitted, 0); |
| assert_eq!(state.total_completed, 0); |
| assert_eq!(state.max_concurrent_per_stream, 8); |
| assert_eq!(state.max_total_queued, 256); |
| } |
|
|
| #[test] |
| fn test_submit_parallel_batch() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![ |
| make_task("t1", "spf_read"), |
| make_task("t2", "spf_grep"), |
| make_task("t3", "spf_glob"), |
| ], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
|
|
| let result = state.submit_batch(batch); |
| assert!(result.is_ok()); |
| assert_eq!(state.total_submitted, 3); |
| } |
|
|
| #[test] |
| fn test_submit_chain_batch() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![ |
| make_task("t1", "spf_read"), |
| make_task("t2", "spf_grep"), |
| ], |
| mode: BatchMode::Chain, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
|
|
| let result = state.submit_batch(batch); |
| assert!(result.is_ok()); |
| |
| assert_eq!(state.total_submitted, 1); |
| } |
|
|
| #[test] |
| fn test_next_task() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![make_task("t1", "spf_read"), make_task("t2", "spf_grep")], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
|
|
| let task = state.next_task("s1"); |
| assert!(task.is_some()); |
| assert_eq!(task.unwrap().tool, "spf_grep"); |
| } |
|
|
| #[test] |
| fn test_backpressure() { |
| let mut state = PipelineState::new(); |
| state.max_total_queued = 2; |
|
|
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![ |
| make_task("t1", "spf_read"), |
| make_task("t2", "spf_grep"), |
| make_task("t3", "spf_glob"), |
| ], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
|
|
| let result = state.submit_batch(batch); |
| assert!(result.is_err()); |
| } |
|
|
| #[test] |
| fn test_record_result() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![make_task("t1", "spf_read")], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
| state.next_task("s1"); |
|
|
| state.record_result(make_result("t1", PipelineStatus::Ok)); |
| assert_eq!(state.total_completed, 1); |
| assert!(state.get_result("t1").is_some()); |
| } |
|
|
| #[test] |
| fn test_stream_complete() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![make_task("t1", "spf_read")], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
| assert!(!state.is_stream_complete("s1")); |
|
|
| let _task = state.next_task("s1"); |
| assert!(!state.is_stream_complete("s1")); |
|
|
| state.record_result(make_result("t1", PipelineStatus::Ok)); |
| assert!(state.is_stream_complete("s1")); |
| } |
|
|
| #[test] |
| fn test_status_summary() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![make_task("t1", "spf_read"), make_task("t2", "spf_grep")], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
|
|
| let summary = state.status_summary(); |
| assert_eq!(summary["active_streams"], 1); |
| assert_eq!(summary["total_submitted"], 2); |
| assert_eq!(summary["total_pending"], 2); |
| } |
|
|
| #[test] |
| fn test_failed_result_increments_counter() { |
| let mut state = PipelineState::new(); |
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![make_task("t1", "spf_read")], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
| state.next_task("s1"); |
|
|
| state.record_result(make_result("t1", PipelineStatus::Error)); |
| assert_eq!(state.total_failed, 1); |
| assert_eq!(state.total_completed, 0); |
| } |
|
|
| #[test] |
| fn test_task_frame_roundtrip() { |
| let task = make_task("t1", "spf_read"); |
| let frame = task_to_frame(&task); |
| let back = task_from_frame(&frame).unwrap(); |
| assert_eq!(back.task_id, "t1"); |
| assert_eq!(back.tool, "spf_read"); |
| } |
|
|
| #[test] |
| fn test_result_frame_roundtrip() { |
| let result = make_result("t1", PipelineStatus::Ok); |
| let frame = result_to_frame(&result); |
| let back = result_from_frame(&frame).unwrap(); |
| assert_eq!(back.task_id, "t1"); |
| assert_eq!(back.status, PipelineStatus::Ok); |
| assert_eq!(back.executed_by, "test_worker"); |
| } |
|
|
| #[test] |
| fn test_priority_batch_ordering() { |
| let mut state = PipelineState::new(); |
| let mut t1 = make_task("t1", "low_priority"); |
| t1.priority = 0; |
| let mut t2 = make_task("t2", "urgent"); |
| t2.priority = 2; |
| let mut t3 = make_task("t3", "high"); |
| t3.priority = 1; |
|
|
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![t1, t2, t3], |
| mode: BatchMode::Priority, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
|
|
| |
| let first = state.next_task("s1").unwrap(); |
| assert_eq!(first.tool, "low_priority"); |
| } |
|
|
| #[test] |
| fn test_concurrent_limit() { |
| let mut state = PipelineState::new(); |
| state.max_concurrent_per_stream = 1; |
|
|
| let batch = PipelineBatch { |
| stream_id: "s1".to_string(), |
| tasks: vec![make_task("t1", "spf_read"), make_task("t2", "spf_grep")], |
| mode: BatchMode::Parallel, |
| submitted_by: "test".to_string(), |
| submitted_at: "2026-02-28T00:00:00Z".to_string(), |
| }; |
| state.submit_batch(batch).unwrap(); |
|
|
| let first = state.next_task("s1"); |
| assert!(first.is_some()); |
|
|
| |
| let second = state.next_task("s1"); |
| assert!(second.is_none()); |
| } |
|
|
| #[test] |
| fn test_build_chain() { |
| let tasks = vec![ |
| make_task("t1", "first"), |
| make_task("t2", "second"), |
| make_task("t3", "third"), |
| ]; |
| let chain = PipelineState::build_chain(tasks).unwrap(); |
| assert_eq!(chain.task_id, "t1"); |
| assert_eq!(chain.tool, "first"); |
| let next = chain.chain_next.unwrap(); |
| assert_eq!(next.task_id, "t2"); |
| assert_eq!(next.tool, "second"); |
| let last = next.chain_next.unwrap(); |
| assert_eq!(last.task_id, "t3"); |
| assert_eq!(last.tool, "third"); |
| assert!(last.chain_next.is_none()); |
| } |
|
|
| #[test] |
| fn test_pipeline_error_display() { |
| let err = PipelineError::BackpressureExceeded { |
| current: 250, |
| limit: 256, |
| attempted: 10, |
| }; |
| let msg = format!("{}", err); |
| assert!(msg.contains("250")); |
| assert!(msg.contains("256")); |
| } |
| } |
|
|