SPFsmartGATE / src /pipeline.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Pipeline Protocol
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// BLOCK N — Pipeline orchestration for streaming task execution.
// Enables: batch submission, chain execution, worker dispatch, backpressure.
//
// Pipeline messages flow over mesh stream types 0x04 (PipelineTask) and 0x05 (PipelineResult).
// Orchestrator sends tasks → workers execute → results stream back.
//
// Depends on: Blocks F-H (framing + mesh streaming), Block I (Source::Pipeline),
// Block E (transformer for worker inference)
//
// Integrates with mesh.rs stream_router() — replaces PipelineTask/PipelineResult stubs.
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::Instant;
// ============================================================================
// PIPELINE MESSAGE TYPES
// ============================================================================
/// Unique identifier for a pipeline task
pub type TaskId = String;
/// Unique identifier for a pipeline stream/session
pub type StreamId = String;
/// A single task in a pipeline batch or chain.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineTask {
/// Unique task identifier
pub task_id: TaskId,
/// Stream this task belongs to
pub stream_id: StreamId,
/// Tool to execute
pub tool: String,
/// Tool arguments
pub args: Value,
/// Optional: next task in chain (output of this feeds input of next)
#[serde(default)]
pub chain_next: Option<Box<PipelineTask>>,
/// Priority: 0 = normal, 1 = high, 2 = urgent
#[serde(default)]
pub priority: u8,
/// Timeout in milliseconds (0 = use default)
#[serde(default)]
pub timeout_ms: u64,
/// Which output field to pipe to next task's input (for chaining)
#[serde(default)]
pub pipe_field: Option<String>,
}
/// Result of a pipeline task execution.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineResult {
/// Matches the task_id from PipelineTask
pub task_id: TaskId,
/// Stream this result belongs to
pub stream_id: StreamId,
/// Execution status
pub status: PipelineStatus,
/// Tool result (if successful)
pub result: Value,
/// Error message (if failed)
#[serde(default)]
pub error: Option<String>,
/// Execution duration in milliseconds
pub duration_ms: u64,
/// Peer that executed this task
pub executed_by: String,
}
/// Pipeline task execution status
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum PipelineStatus {
/// Task completed successfully
Ok,
/// Task failed with error
Error,
/// Task was blocked by gate
Blocked,
/// Task timed out
Timeout,
/// Task is queued, waiting for worker
Queued,
/// Task is currently executing
Running,
}
// ============================================================================
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PipelineBatch {
/// Stream/session identifier
pub stream_id: StreamId,
/// All tasks in this batch
pub tasks: Vec<PipelineTask>,
/// Execution mode
pub mode: BatchMode,
/// Peer key of the submitter
pub submitted_by: String,
/// Submission timestamp
pub submitted_at: String,
}
/// How to execute a batch of tasks
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum BatchMode {
/// Execute all tasks in parallel, return all results
Parallel,
/// Execute sequentially, output N feeds input N+1
Chain,
/// Execute in parallel but respect priority ordering
Priority,
}
// ============================================================================
// PIPELINE STATE — tracks active streams, queues, and backpressure
// ============================================================================
/// Pipeline orchestrator state. Manages task queues and worker dispatch.
pub struct PipelineState {
/// Active task queues per stream
queues: HashMap<StreamId, TaskQueue>,
/// Results waiting to be collected
results: HashMap<TaskId, PipelineResult>,
/// Backpressure: max concurrent tasks per stream
pub max_concurrent_per_stream: usize,
/// Backpressure: max total queued tasks
pub max_total_queued: usize,
/// Total tasks submitted
pub total_submitted: u64,
/// Total tasks completed
pub total_completed: u64,
/// Total tasks failed
pub total_failed: u64,
}
/// Per-stream task queue with backpressure tracking.
struct TaskQueue {
/// Tasks waiting to execute
pending: Vec<PipelineTask>,
/// Currently executing task IDs
running: Vec<TaskId>,
/// Completed results for this stream
completed: Vec<TaskId>,
/// Stream creation time
created_at: Instant,
}
impl PipelineState {
/// Create a new pipeline state with default limits
pub fn new() -> Self {
Self {
queues: HashMap::new(),
results: HashMap::new(),
max_concurrent_per_stream: 8,
max_total_queued: 256,
total_submitted: 0,
total_completed: 0,
total_failed: 0,
}
}
/// Submit a batch of tasks to the pipeline.
/// Returns Ok(stream_id) or Err if backpressure limits hit.
pub fn submit_batch(&mut self, batch: PipelineBatch) -> Result<StreamId, PipelineError> {
// Backpressure check: total queue depth
let total_queued: usize = self.queues.values()
.map(|q| q.pending.len() + q.running.len())
.sum();
if total_queued + batch.tasks.len() > self.max_total_queued {
return Err(PipelineError::BackpressureExceeded {
current: total_queued,
limit: self.max_total_queued,
attempted: batch.tasks.len(),
});
}
let stream_id = batch.stream_id.clone();
// Create or get queue for this stream
let queue = self.queues.entry(stream_id.clone()).or_insert_with(|| {
TaskQueue {
pending: Vec::new(),
running: Vec::new(),
completed: Vec::new(),
created_at: Instant::now(),
}
});
// Enqueue tasks
match batch.mode {
BatchMode::Parallel => {
// All tasks go to pending, will be dispatched as workers become available
for task in batch.tasks {
self.total_submitted += 1;
queue.pending.push(task);
}
}
BatchMode::Chain => {
// Only first task goes to pending; rest are chained via chain_next
if let Some(first) = Self::build_chain(batch.tasks) {
self.total_submitted += 1;
queue.pending.push(first);
}
}
BatchMode::Priority => {
// Sort by priority (urgent first), then enqueue
let mut tasks = batch.tasks;
tasks.sort_by(|a, b| b.priority.cmp(&a.priority));
for task in tasks {
self.total_submitted += 1;
queue.pending.push(task);
}
}
}
Ok(stream_id)
}
/// Build a chain from a list of tasks: task[0].chain_next = task[1], etc.
fn build_chain(tasks: Vec<PipelineTask>) -> Option<PipelineTask> {
if tasks.is_empty() {
return None;
}
let mut chain = tasks;
chain.reverse();
let mut current = chain.remove(0);
for mut task in chain {
task.chain_next = Some(Box::new(current));
current = task;
}
Some(current)
}
/// Get the next task to dispatch for a given stream.
/// Respects per-stream concurrency limits.
pub fn next_task(&mut self, stream_id: &str) -> Option<PipelineTask> {
let queue = self.queues.get_mut(stream_id)?;
// Backpressure: check concurrent limit
if queue.running.len() >= self.max_concurrent_per_stream {
return None;
}
let task = queue.pending.pop()?;
queue.running.push(task.task_id.clone());
Some(task)
}
/// Get next tasks across ALL streams (for worker polling).
/// Returns up to `limit` tasks, prioritizing streams with queued work.
pub fn next_tasks(&mut self, limit: usize) -> Vec<PipelineTask> {
let mut tasks = Vec::new();
let stream_ids: Vec<String> = self.queues.keys().cloned().collect();
for stream_id in &stream_ids {
if tasks.len() >= limit {
break;
}
if let Some(task) = self.next_task(stream_id) {
tasks.push(task);
}
}
tasks
}
/// Record a task result. Handles chain continuation.
/// Returns the next chained task if the completed task had chain_next.
pub fn record_result(&mut self, result: PipelineResult) -> Option<PipelineTask> {
let task_id = result.task_id.clone();
let stream_id = result.stream_id.clone();
let succeeded = result.status == PipelineStatus::Ok;
// Update counters
if succeeded {
self.total_completed += 1;
} else {
self.total_failed += 1;
}
// Remove from running
if let Some(queue) = self.queues.get_mut(&stream_id) {
queue.running.retain(|id| id != &task_id);
queue.completed.push(task_id.clone());
}
// Store result
self.results.insert(task_id, result.clone());
// No chain continuation on failure
if !succeeded {
return None;
}
None // Chain continuation handled at dispatch level
}
/// Get a result by task_id
pub fn get_result(&self, task_id: &str) -> Option<&PipelineResult> {
self.results.get(task_id)
}
/// Get all results for a stream
pub fn stream_results(&self, stream_id: &str) -> Vec<&PipelineResult> {
self.results.values()
.filter(|r| r.stream_id == stream_id)
.collect()
}
/// Check if a stream is complete (no pending or running tasks)
pub fn is_stream_complete(&self, stream_id: &str) -> bool {
match self.queues.get(stream_id) {
None => true,
Some(queue) => queue.pending.is_empty() && queue.running.is_empty(),
}
}
/// Get pipeline status summary
pub fn status_summary(&self) -> Value {
let active_streams = self.queues.len();
let total_pending: usize = self.queues.values().map(|q| q.pending.len()).sum();
let total_running: usize = self.queues.values().map(|q| q.running.len()).sum();
json!({
"active_streams": active_streams,
"total_pending": total_pending,
"total_running": total_running,
"total_submitted": self.total_submitted,
"total_completed": self.total_completed,
"total_failed": self.total_failed,
"max_concurrent_per_stream": self.max_concurrent_per_stream,
"max_total_queued": self.max_total_queued,
})
}
/// Clean up completed streams older than max_age
pub fn cleanup_completed(&mut self, max_age_secs: u64) {
let now = Instant::now();
let to_remove: Vec<String> = self.queues.iter()
.filter(|(_, q)| {
q.pending.is_empty()
&& q.running.is_empty()
&& now.duration_since(q.created_at).as_secs() > max_age_secs
})
.map(|(id, _)| id.clone())
.collect();
for id in &to_remove {
self.queues.remove(id);
// Clean associated results
self.results.retain(|_, r| &r.stream_id != id);
}
}
}
// ============================================================================
// PIPELINE ERRORS
// ============================================================================
/// Pipeline operation errors
#[derive(Debug, Clone)]
pub enum PipelineError {
BackpressureExceeded {
current: usize,
limit: usize,
attempted: usize,
},
StreamNotFound(String),
TaskNotFound(String),
InvalidTask(String),
}
impl std::fmt::Display for PipelineError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
PipelineError::BackpressureExceeded { current, limit, attempted } => {
write!(f, "Backpressure: {}/{} queued, cannot add {} more", current, limit, attempted)
}
PipelineError::StreamNotFound(id) => write!(f, "Stream not found: {}", id),
PipelineError::TaskNotFound(id) => write!(f, "Task not found: {}", id),
PipelineError::InvalidTask(msg) => write!(f, "Invalid task: {}", msg),
}
}
}
// ============================================================================
// FRAME INTEGRATION — encode/decode pipeline messages to/from mesh frames
// ============================================================================
/// Encode a PipelineTask into a Frame for mesh transmission
pub fn task_to_frame(task: &PipelineTask) -> crate::framing::Frame {
let payload = serde_json::to_vec(task).unwrap_or_default();
crate::framing::Frame::pipeline_task(payload)
}
/// Decode a PipelineTask from a Frame payload
pub fn task_from_frame(frame: &crate::framing::Frame) -> Result<PipelineTask, String> {
serde_json::from_slice(&frame.payload)
.map_err(|e| format!("Failed to decode PipelineTask: {}", e))
}
/// Encode a PipelineResult into a Frame for mesh transmission
pub fn result_to_frame(result: &PipelineResult) -> crate::framing::Frame {
let payload = serde_json::to_vec(result).unwrap_or_default();
crate::framing::Frame::pipeline_result(payload)
}
/// Decode a PipelineResult from a Frame payload
pub fn result_from_frame(frame: &crate::framing::Frame) -> Result<PipelineResult, String> {
serde_json::from_slice(&frame.payload)
.map_err(|e| format!("Failed to decode PipelineResult: {}", e))
}
// ============================================================================
// MESH STREAM ROUTER HANDLER — replaces stubs in mesh.rs stream_router()
// ============================================================================
/// Handle an inbound PipelineTask frame from mesh.
/// Called from mesh.rs stream_router() when StreamType::PipelineTask arrives.
/// Executes the task via dispatch::call() and returns a PipelineResult frame.
pub fn handle_pipeline_task(
frame: &crate::framing::Frame,
state: &Arc<crate::http::ServerState>,
peer_key: &str,
) -> Option<crate::framing::Frame> {
let task = match task_from_frame(frame) {
Ok(t) => t,
Err(e) => {
let err_result = PipelineResult {
task_id: "unknown".to_string(),
stream_id: "unknown".to_string(),
status: PipelineStatus::Error,
result: Value::Null,
error: Some(format!("Decode error: {}", e)),
duration_ms: 0,
executed_by: "self".to_string(),
};
return Some(result_to_frame(&err_result));
}
};
let start = Instant::now();
// Execute via unified dispatch with Source::Pipeline
let source = crate::dispatch::Source::Pipeline {
stream_id: task.stream_id.clone(),
peer_key: peer_key.to_string(),
};
let response = crate::dispatch::call(state, source, &task.tool, &task.args);
let duration_ms = start.elapsed().as_millis() as u64;
let status = if response.status == "ok" {
PipelineStatus::Ok
} else {
PipelineStatus::Blocked
};
let result = PipelineResult {
task_id: task.task_id,
stream_id: task.stream_id,
status,
result: response.result,
error: if response.status != "ok" {
Some(response.status.clone())
} else {
None
},
duration_ms,
executed_by: "self".to_string(),
};
// Handle chain: if task had chain_next and this succeeded, execute next
// Chain execution is sequential — result of current feeds into next
if result.status == PipelineStatus::Ok {
if let Some(mut next_task) = task.chain_next.map(|b| *b) {
// Pipe output field from current result to next task's args
if let Some(pipe_field) = &task.pipe_field {
if let Some(piped_value) = result.result.get(pipe_field) {
if let Some(args_obj) = next_task.args.as_object_mut() {
args_obj.insert("input".to_string(), piped_value.clone());
}
}
}
// Recursive chain execution via dispatch
let chain_frame = task_to_frame(&next_task);
return handle_pipeline_task(&chain_frame, state, peer_key);
}
}
Some(result_to_frame(&result))
}
/// Handle an inbound PipelineResult frame from mesh.
/// Called when a remote worker sends back results.
pub fn handle_pipeline_result(
frame: &crate::framing::Frame,
pipeline: &Arc<Mutex<PipelineState>>,
) -> Option<crate::framing::Frame> {
let result = match result_from_frame(frame) {
Ok(r) => r,
Err(_) => return None,
};
let mut state = pipeline.lock().unwrap();
state.record_result(result);
None // Results are stored, no response needed
}
// ============================================================================
// MCP TOOL HANDLERS — for spf_mesh_pipeline and spf_mesh_stream
// ============================================================================
/// Handle spf_mesh_pipeline tool — submit a batch of tasks to a peer.
/// Returns batch submission confirmation with stream_id.
pub fn handle_mesh_pipeline(
args: &Value,
pipeline: &Arc<Mutex<PipelineState>>,
) -> Value {
let peer_key = match args.get("peer_key").and_then(|v| v.as_str()) {
Some(k) => k,
None => return json!({"type": "text", "text": "ERROR: 'peer_key' required"}),
};
let tasks_val = match args.get("tasks").and_then(|v| v.as_array()) {
Some(t) => t,
None => return json!({"type": "text", "text": "ERROR: 'tasks' array required"}),
};
let mode = match args.get("mode").and_then(|v| v.as_str()).unwrap_or("parallel") {
"chain" => BatchMode::Chain,
"priority" => BatchMode::Priority,
_ => BatchMode::Parallel,
};
let stream_id = format!("pipe_{}_{}", &peer_key[..8.min(peer_key.len())],
chrono::Utc::now().timestamp_millis());
// Parse tasks
let mut tasks = Vec::new();
for (i, task_val) in tasks_val.iter().enumerate() {
let tool = match task_val.get("tool").and_then(|v| v.as_str()) {
Some(t) => t.to_string(),
None => return json!({"type": "text", "text": format!("ERROR: task[{}] missing 'tool'", i)}),
};
let task_args = task_val.get("args").cloned().unwrap_or(json!({}));
let task_id = format!("{}_{}", stream_id, i);
tasks.push(PipelineTask {
task_id,
stream_id: stream_id.clone(),
tool,
args: task_args,
chain_next: None,
priority: task_val.get("priority").and_then(|v| v.as_u64()).unwrap_or(0) as u8,
timeout_ms: task_val.get("timeout_ms").and_then(|v| v.as_u64()).unwrap_or(0),
pipe_field: task_val.get("pipe_field").and_then(|v| v.as_str()).map(String::from),
});
}
let batch = PipelineBatch {
stream_id: stream_id.clone(),
tasks,
mode: mode.clone(),
submitted_by: "local".to_string(),
submitted_at: chrono::Utc::now().to_rfc3339(),
};
let task_count = batch.tasks.len();
let mut state = pipeline.lock().unwrap();
match state.submit_batch(batch) {
Ok(sid) => {
json!({"type": "text", "text": format!(
"Pipeline submitted: {} tasks ({:?} mode)\nStream: {}\nTarget: {}",
task_count, mode, sid, peer_key
)})
}
Err(e) => {
json!({"type": "text", "text": format!("Pipeline error: {}", e)})
}
}
}
/// Handle pipeline status query
pub fn handle_pipeline_status(pipeline: &Arc<Mutex<PipelineState>>) -> Value {
let state = pipeline.lock().unwrap();
let summary = state.status_summary();
json!({"type": "text", "text": format!("Pipeline Status:\n{}",
serde_json::to_string_pretty(&summary).unwrap_or_default())})
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
fn make_task(id: &str, tool: &str) -> PipelineTask {
PipelineTask {
task_id: id.to_string(),
stream_id: "test_stream".to_string(),
tool: tool.to_string(),
args: json!({"test": true}),
chain_next: None,
priority: 0,
timeout_ms: 0,
pipe_field: None,
}
}
fn make_result(id: &str, status: PipelineStatus) -> PipelineResult {
PipelineResult {
task_id: id.to_string(),
stream_id: "test_stream".to_string(),
status,
result: json!({"output": "test"}),
error: None,
duration_ms: 42,
executed_by: "test_worker".to_string(),
}
}
#[test]
fn test_pipeline_state_new() {
let state = PipelineState::new();
assert_eq!(state.total_submitted, 0);
assert_eq!(state.total_completed, 0);
assert_eq!(state.max_concurrent_per_stream, 8);
assert_eq!(state.max_total_queued, 256);
}
#[test]
fn test_submit_parallel_batch() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![
make_task("t1", "spf_read"),
make_task("t2", "spf_grep"),
make_task("t3", "spf_glob"),
],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
let result = state.submit_batch(batch);
assert!(result.is_ok());
assert_eq!(state.total_submitted, 3);
}
#[test]
fn test_submit_chain_batch() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![
make_task("t1", "spf_read"),
make_task("t2", "spf_grep"),
],
mode: BatchMode::Chain,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
let result = state.submit_batch(batch);
assert!(result.is_ok());
// Chain builds one linked task
assert_eq!(state.total_submitted, 1);
}
#[test]
fn test_next_task() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![make_task("t1", "spf_read"), make_task("t2", "spf_grep")],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
let task = state.next_task("s1");
assert!(task.is_some());
assert_eq!(task.unwrap().tool, "spf_grep"); // LIFO from Vec::pop
}
#[test]
fn test_backpressure() {
let mut state = PipelineState::new();
state.max_total_queued = 2;
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![
make_task("t1", "spf_read"),
make_task("t2", "spf_grep"),
make_task("t3", "spf_glob"),
],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
let result = state.submit_batch(batch);
assert!(result.is_err());
}
#[test]
fn test_record_result() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![make_task("t1", "spf_read")],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
state.next_task("s1"); // Move to running
state.record_result(make_result("t1", PipelineStatus::Ok));
assert_eq!(state.total_completed, 1);
assert!(state.get_result("t1").is_some());
}
#[test]
fn test_stream_complete() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![make_task("t1", "spf_read")],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
assert!(!state.is_stream_complete("s1"));
let _task = state.next_task("s1");
assert!(!state.is_stream_complete("s1")); // Still running
state.record_result(make_result("t1", PipelineStatus::Ok));
assert!(state.is_stream_complete("s1"));
}
#[test]
fn test_status_summary() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![make_task("t1", "spf_read"), make_task("t2", "spf_grep")],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
let summary = state.status_summary();
assert_eq!(summary["active_streams"], 1);
assert_eq!(summary["total_submitted"], 2);
assert_eq!(summary["total_pending"], 2);
}
#[test]
fn test_failed_result_increments_counter() {
let mut state = PipelineState::new();
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![make_task("t1", "spf_read")],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
state.next_task("s1");
state.record_result(make_result("t1", PipelineStatus::Error));
assert_eq!(state.total_failed, 1);
assert_eq!(state.total_completed, 0);
}
#[test]
fn test_task_frame_roundtrip() {
let task = make_task("t1", "spf_read");
let frame = task_to_frame(&task);
let back = task_from_frame(&frame).unwrap();
assert_eq!(back.task_id, "t1");
assert_eq!(back.tool, "spf_read");
}
#[test]
fn test_result_frame_roundtrip() {
let result = make_result("t1", PipelineStatus::Ok);
let frame = result_to_frame(&result);
let back = result_from_frame(&frame).unwrap();
assert_eq!(back.task_id, "t1");
assert_eq!(back.status, PipelineStatus::Ok);
assert_eq!(back.executed_by, "test_worker");
}
#[test]
fn test_priority_batch_ordering() {
let mut state = PipelineState::new();
let mut t1 = make_task("t1", "low_priority");
t1.priority = 0;
let mut t2 = make_task("t2", "urgent");
t2.priority = 2;
let mut t3 = make_task("t3", "high");
t3.priority = 1;
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![t1, t2, t3],
mode: BatchMode::Priority,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
// Pop order should be low_priority first (LIFO from sorted: urgent, high, low)
let first = state.next_task("s1").unwrap();
assert_eq!(first.tool, "low_priority"); // Last in sorted = first popped
}
#[test]
fn test_concurrent_limit() {
let mut state = PipelineState::new();
state.max_concurrent_per_stream = 1;
let batch = PipelineBatch {
stream_id: "s1".to_string(),
tasks: vec![make_task("t1", "spf_read"), make_task("t2", "spf_grep")],
mode: BatchMode::Parallel,
submitted_by: "test".to_string(),
submitted_at: "2026-02-28T00:00:00Z".to_string(),
};
state.submit_batch(batch).unwrap();
let first = state.next_task("s1");
assert!(first.is_some());
// Second task blocked by concurrent limit
let second = state.next_task("s1");
assert!(second.is_none());
}
#[test]
fn test_build_chain() {
let tasks = vec![
make_task("t1", "first"),
make_task("t2", "second"),
make_task("t3", "third"),
];
let chain = PipelineState::build_chain(tasks).unwrap();
assert_eq!(chain.task_id, "t1");
assert_eq!(chain.tool, "first");
let next = chain.chain_next.unwrap();
assert_eq!(next.task_id, "t2");
assert_eq!(next.tool, "second");
let last = next.chain_next.unwrap();
assert_eq!(last.task_id, "t3");
assert_eq!(last.tool, "third");
assert!(last.chain_next.is_none());
}
#[test]
fn test_pipeline_error_display() {
let err = PipelineError::BackpressureExceeded {
current: 250,
limit: 256,
attempted: 10,
};
let msg = format!("{}", err);
assert!(msg.contains("250"));
assert!(msg.contains("256"));
}
}