Spaces:
Running
Running
File size: 4,565 Bytes
3193174 5cdde73 3193174 5cdde73 81f5c1c 5cdde73 81f5c1c 5cdde73 81f5c1c 5cdde73 3193174 5cdde73 3193174 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | """Pydantic schemas for execution API requests and responses."""
from enum import Enum
from typing import Any
from pydantic import BaseModel, Field
from .graph import GraphSaveRequest
class BudgetConfigSchema(BaseModel):
"""Budget configuration for execution."""
total_token_limit: int | None = None
total_request_limit: int | None = None
max_prompt_length: int = 4000
max_completion_length: int = 2000
timeout_seconds: float | None = None
# ---------------------------------------------------------------------------
# Early-stop conditions (declarative schema sent from the UI)
# ---------------------------------------------------------------------------
class EarlyStopType(str, Enum):
KEYWORD = "keyword"
TOKEN_LIMIT = "token_limit"
AGENT_COUNT = "agent_count"
class EarlyStopConditionSchema(BaseModel):
"""One early-stop rule configured in the UI."""
type: EarlyStopType
keyword: str | None = None # for KEYWORD
max_tokens: int | None = None # for TOKEN_LIMIT
max_agents: int | None = None # for AGENT_COUNT
# ---------------------------------------------------------------------------
# Topology hooks (predefined hook presets selectable from the UI)
# ---------------------------------------------------------------------------
class TopologyHookType(str, Enum):
STOP_ON_KEYWORD = "stop_on_keyword"
SKIP_ON_TOKEN_BUDGET = "skip_on_token_budget"
FORCE_REVIEWER_ON_ERROR = "force_reviewer_on_error"
INSERT_CHAIN_ON_KEYWORD = "insert_chain_on_keyword"
ADD_EDGE_ON_KEYWORD = "add_edge_on_keyword"
REDIRECT_END_ON_KEYWORD = "redirect_end_on_keyword"
SKIP_AGENT_ON_KEYWORD = "skip_agent_on_keyword"
class TopologyHookSchema(BaseModel):
"""A predefined topology-hook preset."""
type: TopologyHookType
keyword: str | None = None # for keyword-based hooks
token_threshold: int | None = None # for SKIP_ON_TOKEN_BUDGET
reviewer_agent_id: str | None = None # for FORCE_REVIEWER_ON_ERROR
source_agent: str | None = None # for edge/chain hooks
target_agent: str | None = None # for edge/chain/redirect hooks
weight: float = 1.0 # for ADD_EDGE_ON_KEYWORD
class RunnerConfigSchema(BaseModel):
"""Runner configuration mirroring gMAS RunnerConfig."""
timeout: float = 60.0
adaptive: bool = False
enable_parallel: bool = True
max_parallel_size: int = 5
max_retries: int = 2
enable_memory: bool = False
memory_context_limit: int = 5
broadcast_task_to_all: bool = True
enable_dynamic_topology: bool = False
max_tool_iterations: int = 3
budget_config: BudgetConfigSchema | None = None
early_stop_conditions: list[EarlyStopConditionSchema] = Field(default_factory=list)
topology_hooks: list[TopologyHookSchema] = Field(default_factory=list)
class LLMProviderConfig(BaseModel):
"""LLM provider configuration."""
provider_id: str
name: str
base_url: str
api_key: str # Can use $ENV_VAR format
default_model: str | None = None
class ExecutionRequest(BaseModel):
"""Request to start a workflow execution."""
graph_id: str | None = None
graph: GraphSaveRequest | None = None
task_query: str
config: RunnerConfigSchema | None = None
llm_provider: LLMProviderConfig | None = None
class StreamEventResponse(BaseModel):
"""A streaming event sent over WebSocket."""
event_type: str
timestamp: str
run_id: str | None = None
data: dict[str, Any] = Field(default_factory=dict)
class AgentStatusEntry(BaseModel):
"""Status of a single agent during execution."""
agent_id: str
status: str # "pending", "running", "completed", "error"
response: str | None = None
tokens_used: int = 0
duration_ms: float = 0.0
error: str | None = None
class ExecutionStatusResponse(BaseModel):
"""Response for execution status query."""
run_id: str
status: str # "running", "completed", "error", "cancelled"
agent_statuses: list[AgentStatusEntry] = Field(default_factory=list)
events: list[StreamEventResponse] = Field(default_factory=list)
result: dict[str, Any] | None = None
started_at: str = ""
completed_at: str | None = None
class RunHistoryEntry(BaseModel):
"""Summary entry for run history."""
run_id: str
graph_id: str | None = None
graph_name: str = ""
task_query: str = ""
status: str
total_tokens: int = 0
total_time: float = 0.0
agent_count: int = 0
started_at: str = ""
completed_at: str | None = None
|