Madras1 commited on
Commit
d6815ad
·
verified ·
1 Parent(s): ecebdd5

Upload 17 files

Browse files
.env.example ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Environment variables for Tritan
2
+
3
+ # LLM API Keys (add your keys here)
4
+ GROQ_API_KEY=
5
+ OPENROUTER_API_KEY=
6
+ CEREBRAS_API_KEY=
7
+ GEMINI_API_KEY=
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environment
2
+ .env
3
+ .venv
4
+ venv/
5
+ __pycache__/
6
+ *.pyc
7
+
8
+ # IDE
9
+ .vscode/
10
+ .idea/
11
+
12
+ # OS
13
+ .DS_Store
14
+ Thumbs.db
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install dependencies
6
+ COPY requirements.txt .
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ # Copy app
10
+ COPY . .
11
+
12
+ # HuggingFace Spaces uses port 7860
13
+ EXPOSE 7860
14
+
15
+ # Run with uvicorn
16
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,11 +1,40 @@
1
- ---
2
- title: Tritan Api
3
- emoji: 🔥
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- license: apache-2.0
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Tritan API
3
+ emoji: 🔱
4
+ colorFrom: purple
5
+ colorTo: indigo
6
+ sdk: docker
7
+ pinned: false
8
+ license: mit
9
+ app_port: 7860
10
+ ---
11
+
12
+ # Tritan API - AI-Native Workflow Engine
13
+
14
+ Backend API for the Tritan workflow engine.
15
+
16
+ ## Endpoints
17
+
18
+ - `GET /` - API info
19
+ - `GET /health` - Health check
20
+ - `GET /docs` - Swagger documentation
21
+ - `POST /api/planner/generate` - Generate workflow from task description
22
+ - `POST /api/workflows` - Create workflow
23
+ - `GET /api/workflows` - List workflows
24
+ - `POST /api/execute/{workflow_id}` - Execute workflow
25
+
26
+ ## Environment Variables
27
+
28
+ Set these as Secrets in your HuggingFace Space:
29
+
30
+ - `GROQ_API_KEY` - Your Groq API key for LLM calls
31
+ - `OPENROUTER_API_KEY` - (Optional) OpenRouter API key
32
+ - `GEMINI_API_KEY` - (Optional) Google Gemini API key
33
+
34
+ ## Usage
35
+
36
+ ```bash
37
+ curl -X POST https://YOUR-SPACE.hf.space/api/planner/generate \
38
+ -H "Content-Type: application/json" \
39
+ -d '{"task": "Monitor Bitcoin price and alert when it drops 5%"}'
40
+ ```
api/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # API package
api/execute.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Workflow execution endpoints
3
+ """
4
+ from fastapi import APIRouter, HTTPException
5
+ from fastapi.responses import StreamingResponse
6
+ from models.workflow import Workflow
7
+ from models.execution import ExecutionState, ExecutionResult, ExecutionStatus, NodeResult
8
+ from core.engine import WorkflowEngine
9
+ from typing import AsyncGenerator
10
+ import json
11
+
12
+ router = APIRouter()
13
+ engine = WorkflowEngine()
14
+
15
+
16
+ @router.post("/")
17
+ async def execute_workflow(workflow: Workflow):
18
+ """Execute a workflow and return results"""
19
+ try:
20
+ result = await engine.execute(workflow)
21
+ return result
22
+ except Exception as e:
23
+ raise HTTPException(status_code=500, detail=str(e))
24
+
25
+
26
+ @router.post("/stream")
27
+ async def execute_workflow_stream(workflow: Workflow):
28
+ """Execute a workflow with streaming results"""
29
+
30
+ async def generate() -> AsyncGenerator[str, None]:
31
+ async for event in engine.execute_stream(workflow):
32
+ yield f"data: {json.dumps(event)}\n\n"
33
+
34
+ return StreamingResponse(
35
+ generate(),
36
+ media_type="text/event-stream"
37
+ )
38
+
39
+
40
+ @router.post("/node/{node_type}")
41
+ async def execute_single_node(node_type: str, config: dict):
42
+ """Execute a single node (for testing)"""
43
+ try:
44
+ result = await engine.execute_node(node_type, config)
45
+ return result
46
+ except Exception as e:
47
+ raise HTTPException(status_code=500, detail=str(e))
api/planner.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Auto-Planner API - Generates workflows from natural language descriptions
3
+ """
4
+ import os
5
+ import json
6
+ import httpx
7
+ from fastapi import APIRouter, HTTPException
8
+ from pydantic import BaseModel
9
+ from typing import Optional
10
+ from dotenv import load_dotenv
11
+
12
+ load_dotenv()
13
+
14
+ router = APIRouter()
15
+
16
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
17
+ GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"
18
+
19
+ SYSTEM_PROMPT = """You are a workflow planner for Tritan, an AI-native workflow engine.
20
+ Given a task description, create a workflow using these node types:
21
+
22
+ - trigger: Starting point of the workflow (ALWAYS required as first node)
23
+ - action: Execute a generic task
24
+ - condition: Branch based on a condition (has 'condition' field like "value > 10")
25
+ - loop: Iterate over items
26
+ - llm: AI language model call (has 'prompt', 'provider', 'model' fields)
27
+ - http: HTTP API request (has 'url', 'method' fields)
28
+ - code: Custom JavaScript code execution (has 'code' field)
29
+ - transform: Data transformation
30
+
31
+ Return ONLY valid JSON in this exact format, no markdown, no explanation:
32
+ {
33
+ "name": "Descriptive Workflow Name",
34
+ "nodes": [
35
+ {"id": "n1", "type": "trigger", "x": 100, "y": 150, "data": {}},
36
+ {"id": "n2", "type": "llm", "x": 350, "y": 150, "data": {"prompt": "Your prompt here", "provider": "groq", "model": "llama-3.3-70b-versatile"}},
37
+ {"id": "n3", "type": "http", "x": 600, "y": 150, "data": {"url": "https://api.example.com", "method": "POST"}}
38
+ ],
39
+ "connections": [
40
+ {"from": "n1", "to": "n2"},
41
+ {"from": "n2", "to": "n3"}
42
+ ]
43
+ }
44
+
45
+ Rules:
46
+ - Position nodes horizontally: x increases by 250 for each step
47
+ - Keep y around 100-200 for a clean layout
48
+ - Always start with a trigger node
49
+ - Make prompts specific and actionable
50
+ - Use realistic URLs for HTTP nodes
51
+ - For conditions, write clear boolean expressions"""
52
+
53
+
54
+ class PlannerRequest(BaseModel):
55
+ task: str
56
+ provider: Optional[str] = "groq"
57
+ model: Optional[str] = "llama-3.3-70b-versatile"
58
+
59
+
60
+ class PlannerResponse(BaseModel):
61
+ name: str
62
+ nodes: list
63
+ connections: list
64
+
65
+
66
+ @router.post("/generate", response_model=PlannerResponse)
67
+ async def generate_workflow(request: PlannerRequest):
68
+ """Generate a workflow from a natural language task description."""
69
+
70
+ if not request.task.strip():
71
+ raise HTTPException(status_code=400, detail="Task description is required")
72
+
73
+ if not GROQ_API_KEY:
74
+ raise HTTPException(status_code=500, detail="GROQ_API_KEY not configured in backend")
75
+
76
+ try:
77
+ async with httpx.AsyncClient(timeout=60.0) as client:
78
+ response = await client.post(
79
+ GROQ_API_URL,
80
+ headers={
81
+ "Content-Type": "application/json",
82
+ "Authorization": f"Bearer {GROQ_API_KEY}"
83
+ },
84
+ json={
85
+ "model": request.model,
86
+ "messages": [
87
+ {"role": "system", "content": SYSTEM_PROMPT},
88
+ {"role": "user", "content": f"Create a workflow for this task: {request.task}"}
89
+ ],
90
+ "temperature": 0.3,
91
+ "max_tokens": 2000
92
+ }
93
+ )
94
+
95
+ if response.status_code != 200:
96
+ error_data = response.json()
97
+ raise HTTPException(
98
+ status_code=response.status_code,
99
+ detail=error_data.get("error", {}).get("message", "API error")
100
+ )
101
+
102
+ data = response.json()
103
+ content = data["choices"][0]["message"]["content"]
104
+
105
+ # Extract JSON from response (handle markdown code blocks)
106
+ content = content.strip()
107
+ if content.startswith("```"):
108
+ content = content.split("```")[1]
109
+ if content.startswith("json"):
110
+ content = content[4:]
111
+
112
+ # Parse the workflow JSON
113
+ try:
114
+ workflow = json.loads(content)
115
+ except json.JSONDecodeError as e:
116
+ raise HTTPException(status_code=500, detail=f"Failed to parse workflow JSON: {str(e)}")
117
+
118
+ # Validate required fields
119
+ if "nodes" not in workflow:
120
+ raise HTTPException(status_code=500, detail="Generated workflow missing 'nodes'")
121
+ if "connections" not in workflow:
122
+ workflow["connections"] = []
123
+ if "name" not in workflow:
124
+ workflow["name"] = "Generated Workflow"
125
+
126
+ # Ensure all nodes have required fields
127
+ for i, node in enumerate(workflow["nodes"]):
128
+ if "id" not in node:
129
+ node["id"] = f"node_{i}"
130
+ if "data" not in node:
131
+ node["data"] = {}
132
+ if "x" not in node:
133
+ node["x"] = 100 + (i * 250)
134
+ if "y" not in node:
135
+ node["y"] = 150
136
+
137
+ return PlannerResponse(
138
+ name=workflow["name"],
139
+ nodes=workflow["nodes"],
140
+ connections=workflow["connections"]
141
+ )
142
+
143
+ except httpx.TimeoutException:
144
+ raise HTTPException(status_code=504, detail="Request to LLM timed out")
145
+ except httpx.RequestError as e:
146
+ raise HTTPException(status_code=500, detail=f"Request error: {str(e)}")
api/workflows.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Workflow CRUD endpoints
3
+ """
4
+ from fastapi import APIRouter, HTTPException
5
+ from models.workflow import Workflow, Node, Edge
6
+ from typing import Optional
7
+
8
+ router = APIRouter()
9
+
10
+ # In-memory storage for MVP (frontend uses localStorage, this is for API testing)
11
+ workflows_db: dict[str, Workflow] = {}
12
+
13
+
14
+ @router.get("/")
15
+ async def list_workflows():
16
+ """List all workflows"""
17
+ return {"workflows": list(workflows_db.values())}
18
+
19
+
20
+ @router.get("/{workflow_id}")
21
+ async def get_workflow(workflow_id: str):
22
+ """Get a specific workflow by ID"""
23
+ if workflow_id not in workflows_db:
24
+ raise HTTPException(status_code=404, detail="Workflow not found")
25
+ return workflows_db[workflow_id]
26
+
27
+
28
+ @router.post("/")
29
+ async def create_workflow(workflow: Workflow):
30
+ """Create a new workflow"""
31
+ workflows_db[workflow.id] = workflow
32
+ return {"message": "Workflow created", "workflow": workflow}
33
+
34
+
35
+ @router.put("/{workflow_id}")
36
+ async def update_workflow(workflow_id: str, workflow: Workflow):
37
+ """Update an existing workflow"""
38
+ if workflow_id not in workflows_db:
39
+ raise HTTPException(status_code=404, detail="Workflow not found")
40
+ workflow.id = workflow_id # Ensure ID matches
41
+ workflows_db[workflow_id] = workflow
42
+ return {"message": "Workflow updated", "workflow": workflow}
43
+
44
+
45
+ @router.delete("/{workflow_id}")
46
+ async def delete_workflow(workflow_id: str):
47
+ """Delete a workflow"""
48
+ if workflow_id not in workflows_db:
49
+ raise HTTPException(status_code=404, detail="Workflow not found")
50
+ del workflows_db[workflow_id]
51
+ return {"message": "Workflow deleted"}
52
+
53
+
54
+ @router.post("/validate")
55
+ async def validate_workflow(workflow: Workflow):
56
+ """Validate a workflow structure (Guardian pre-check)"""
57
+ errors = []
58
+ warnings = []
59
+
60
+ # Check for trigger node
61
+ trigger_nodes = [n for n in workflow.nodes if n.type == "trigger"]
62
+ if len(trigger_nodes) == 0:
63
+ errors.append("Workflow must have at least one trigger node")
64
+ elif len(trigger_nodes) > 1:
65
+ warnings.append("Multiple trigger nodes found - only first will be used")
66
+
67
+ # Check for orphan nodes (no connections)
68
+ connected_nodes = set()
69
+ for edge in workflow.edges:
70
+ connected_nodes.add(edge.source)
71
+ connected_nodes.add(edge.target)
72
+
73
+ for node in workflow.nodes:
74
+ if node.id not in connected_nodes and node.type != "trigger":
75
+ warnings.append(f"Node '{node.data.label}' is not connected to any other node")
76
+
77
+ # Check LLM nodes have required fields
78
+ for node in workflow.nodes:
79
+ if node.type == "llm":
80
+ if not node.data.provider:
81
+ errors.append(f"LLM node '{node.data.label}' must have a provider")
82
+ if not node.data.model:
83
+ errors.append(f"LLM node '{node.data.label}' must have a model")
84
+
85
+ return {
86
+ "valid": len(errors) == 0,
87
+ "errors": errors,
88
+ "warnings": warnings
89
+ }
core/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Core package
2
+ from .engine import WorkflowEngine
3
+ from .nodes import NodeExecutor
4
+ from .guardian import Guardian
5
+
6
+ __all__ = ["WorkflowEngine", "NodeExecutor", "Guardian"]
core/engine.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Workflow Execution Engine
3
+ """
4
+ from models.workflow import Workflow, Node, NodeType
5
+ from models.execution import ExecutionState, ExecutionResult, ExecutionStatus, NodeResult
6
+ from .nodes import NodeExecutor
7
+ from .guardian import Guardian
8
+ from datetime import datetime
9
+ from typing import Any, AsyncGenerator
10
+ import asyncio
11
+
12
+
13
+ class WorkflowEngine:
14
+ """
15
+ Main workflow execution engine.
16
+ Handles node execution, parallel branching, and state management.
17
+ """
18
+
19
+ def __init__(self):
20
+ self.node_executor = NodeExecutor()
21
+ self.guardian = Guardian()
22
+
23
+ async def execute(self, workflow: Workflow) -> ExecutionResult:
24
+ """Execute a workflow and return the final result"""
25
+ start_time = datetime.now()
26
+
27
+ # Initialize execution state
28
+ state = ExecutionState(
29
+ workflow_id=workflow.id,
30
+ status=ExecutionStatus.RUNNING,
31
+ started_at=start_time.isoformat()
32
+ )
33
+
34
+ try:
35
+ # Build execution order (topological sort)
36
+ execution_order = self._build_execution_order(workflow)
37
+
38
+ # Execute nodes in order
39
+ for node_id in execution_order:
40
+ node = self._get_node_by_id(workflow, node_id)
41
+ if not node:
42
+ continue
43
+
44
+ state.current_node = node_id
45
+ node_result = await self._execute_node(node, state)
46
+ state.node_results[node_id] = node_result
47
+
48
+ # Check for failures
49
+ if node_result.status == ExecutionStatus.FAILED:
50
+ state.status = ExecutionStatus.FAILED
51
+ break
52
+
53
+ if state.status != ExecutionStatus.FAILED:
54
+ state.status = ExecutionStatus.COMPLETED
55
+
56
+ except Exception as e:
57
+ state.status = ExecutionStatus.FAILED
58
+ state.node_results["_error"] = NodeResult(
59
+ node_id="_error",
60
+ status=ExecutionStatus.FAILED,
61
+ error=str(e)
62
+ )
63
+
64
+ end_time = datetime.now()
65
+ state.completed_at = end_time.isoformat()
66
+
67
+ return ExecutionResult(
68
+ execution_id=state.execution_id,
69
+ workflow_id=workflow.id,
70
+ status=state.status,
71
+ node_results=list(state.node_results.values()),
72
+ final_output=self._get_final_output(state),
73
+ total_duration_ms=(end_time - start_time).total_seconds() * 1000,
74
+ started_at=state.started_at,
75
+ completed_at=state.completed_at
76
+ )
77
+
78
+ async def execute_stream(self, workflow: Workflow) -> AsyncGenerator[dict, None]:
79
+ """Execute workflow with streaming events"""
80
+ start_time = datetime.now()
81
+
82
+ yield {"type": "start", "workflow_id": workflow.id, "timestamp": start_time.isoformat()}
83
+
84
+ state = ExecutionState(
85
+ workflow_id=workflow.id,
86
+ status=ExecutionStatus.RUNNING,
87
+ started_at=start_time.isoformat()
88
+ )
89
+
90
+ try:
91
+ execution_order = self._build_execution_order(workflow)
92
+
93
+ for node_id in execution_order:
94
+ node = self._get_node_by_id(workflow, node_id)
95
+ if not node:
96
+ continue
97
+
98
+ yield {"type": "node_start", "node_id": node_id, "node_type": node.type}
99
+
100
+ node_result = await self._execute_node(node, state)
101
+ state.node_results[node_id] = node_result
102
+
103
+ yield {
104
+ "type": "node_complete",
105
+ "node_id": node_id,
106
+ "status": node_result.status,
107
+ "output": node_result.output,
108
+ "error": node_result.error
109
+ }
110
+
111
+ if node_result.status == ExecutionStatus.FAILED:
112
+ state.status = ExecutionStatus.FAILED
113
+ break
114
+
115
+ if state.status != ExecutionStatus.FAILED:
116
+ state.status = ExecutionStatus.COMPLETED
117
+
118
+ except Exception as e:
119
+ state.status = ExecutionStatus.FAILED
120
+ yield {"type": "error", "message": str(e)}
121
+
122
+ end_time = datetime.now()
123
+ yield {
124
+ "type": "complete",
125
+ "status": state.status,
126
+ "duration_ms": (end_time - start_time).total_seconds() * 1000
127
+ }
128
+
129
+ async def execute_node(self, node_type: str, config: dict) -> dict:
130
+ """Execute a single node (for testing)"""
131
+ return await self.node_executor.execute(node_type, config, {})
132
+
133
+ async def _execute_node(self, node: Node, state: ExecutionState) -> NodeResult:
134
+ """Execute a single node and return result"""
135
+ start_time = datetime.now()
136
+
137
+ try:
138
+ output = await self.node_executor.execute(
139
+ node.type,
140
+ node.data.model_dump(),
141
+ state.variables
142
+ )
143
+
144
+ end_time = datetime.now()
145
+
146
+ # Store output in variables for next nodes
147
+ state.variables[node.id] = output
148
+
149
+ return NodeResult(
150
+ node_id=node.id,
151
+ status=ExecutionStatus.COMPLETED,
152
+ output=output,
153
+ started_at=start_time.isoformat(),
154
+ completed_at=end_time.isoformat(),
155
+ duration_ms=(end_time - start_time).total_seconds() * 1000
156
+ )
157
+
158
+ except Exception as e:
159
+ end_time = datetime.now()
160
+ return NodeResult(
161
+ node_id=node.id,
162
+ status=ExecutionStatus.FAILED,
163
+ error=str(e),
164
+ started_at=start_time.isoformat(),
165
+ completed_at=end_time.isoformat(),
166
+ duration_ms=(end_time - start_time).total_seconds() * 1000
167
+ )
168
+
169
+ def _build_execution_order(self, workflow: Workflow) -> list[str]:
170
+ """Build topological order for node execution"""
171
+ # Build adjacency list
172
+ graph: dict[str, list[str]] = {node.id: [] for node in workflow.nodes}
173
+ in_degree: dict[str, int] = {node.id: 0 for node in workflow.nodes}
174
+
175
+ for edge in workflow.edges:
176
+ graph[edge.source].append(edge.target)
177
+ in_degree[edge.target] += 1
178
+
179
+ # Find trigger nodes (in_degree = 0)
180
+ queue = [node_id for node_id, degree in in_degree.items() if degree == 0]
181
+ result = []
182
+
183
+ while queue:
184
+ node_id = queue.pop(0)
185
+ result.append(node_id)
186
+
187
+ for neighbor in graph[node_id]:
188
+ in_degree[neighbor] -= 1
189
+ if in_degree[neighbor] == 0:
190
+ queue.append(neighbor)
191
+
192
+ return result
193
+
194
+ def _get_node_by_id(self, workflow: Workflow, node_id: str) -> Node | None:
195
+ """Get a node by its ID"""
196
+ for node in workflow.nodes:
197
+ if node.id == node_id:
198
+ return node
199
+ return None
200
+
201
+ def _get_final_output(self, state: ExecutionState) -> Any:
202
+ """Get the final output from the last executed node"""
203
+ if not state.node_results:
204
+ return None
205
+
206
+ # Get the last completed node's output
207
+ for node_id in reversed(list(state.node_results.keys())):
208
+ result = state.node_results[node_id]
209
+ if result.status == ExecutionStatus.COMPLETED and result.output:
210
+ return result.output
211
+
212
+ return None
core/guardian.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Guardian System - AI-powered workflow monitoring and validation
3
+ """
4
+ from models.workflow import Workflow, Node, NodeType
5
+ from models.execution import ExecutionState, GuardianAlert
6
+ from typing import Optional
7
+ import os
8
+ import httpx
9
+
10
+
11
+ class Guardian:
12
+ """
13
+ The Guardian is an AI agent that:
14
+ 1. Validates workflows before execution
15
+ 2. Monitors execution in real-time
16
+ 3. Detects problems (infinite loops, missing resources)
17
+ 4. Suggests improvements and fixes
18
+ """
19
+
20
+ def __init__(self):
21
+ self.alerts: list[GuardianAlert] = []
22
+
23
+ async def validate(self, workflow: Workflow) -> dict:
24
+ """
25
+ Validate a workflow before execution.
26
+ Returns validation result with errors and warnings.
27
+ """
28
+ errors = []
29
+ warnings = []
30
+ suggestions = []
31
+
32
+ # 1. Check for trigger node
33
+ trigger_nodes = [n for n in workflow.nodes if n.type == NodeType.TRIGGER]
34
+ if len(trigger_nodes) == 0:
35
+ errors.append({
36
+ "type": "missing_trigger",
37
+ "message": "Workflow must have at least one trigger node",
38
+ "suggestion": "Add a Trigger node to start your workflow"
39
+ })
40
+ elif len(trigger_nodes) > 1:
41
+ warnings.append({
42
+ "type": "multiple_triggers",
43
+ "message": "Multiple trigger nodes found",
44
+ "suggestion": "Consider using only one trigger for clarity"
45
+ })
46
+
47
+ # 2. Check for orphan nodes
48
+ connected_nodes = set()
49
+ for edge in workflow.edges:
50
+ connected_nodes.add(edge.source)
51
+ connected_nodes.add(edge.target)
52
+
53
+ for node in workflow.nodes:
54
+ if node.id not in connected_nodes and node.type != NodeType.TRIGGER:
55
+ warnings.append({
56
+ "type": "orphan_node",
57
+ "node_id": node.id,
58
+ "message": f"Node '{node.data.label}' is not connected",
59
+ "suggestion": "Connect this node or remove it"
60
+ })
61
+
62
+ # 3. Check for cycles (infinite loops)
63
+ if self._detect_cycle(workflow):
64
+ errors.append({
65
+ "type": "cycle_detected",
66
+ "message": "Workflow contains a cycle which may cause infinite loops",
67
+ "suggestion": "Remove the cyclic connection or add a loop limit"
68
+ })
69
+
70
+ # 4. Validate LLM nodes
71
+ for node in workflow.nodes:
72
+ if node.type == NodeType.LLM:
73
+ if not node.data.provider:
74
+ errors.append({
75
+ "type": "missing_provider",
76
+ "node_id": node.id,
77
+ "message": f"LLM node '{node.data.label}' must have a provider",
78
+ "suggestion": "Select a provider (groq, openrouter, cerebras, gemini)"
79
+ })
80
+ if not node.data.model:
81
+ errors.append({
82
+ "type": "missing_model",
83
+ "node_id": node.id,
84
+ "message": f"LLM node '{node.data.label}' must have a model",
85
+ "suggestion": "Select a model for this provider"
86
+ })
87
+ if not node.data.prompt:
88
+ warnings.append({
89
+ "type": "empty_prompt",
90
+ "node_id": node.id,
91
+ "message": f"LLM node '{node.data.label}' has no prompt",
92
+ "suggestion": "Add a prompt or use variables"
93
+ })
94
+
95
+ # 5. Check for empty workflow
96
+ if len(workflow.nodes) == 0:
97
+ errors.append({
98
+ "type": "empty_workflow",
99
+ "message": "Workflow has no nodes",
100
+ "suggestion": "Add nodes to create your workflow"
101
+ })
102
+
103
+ # 6. Generate AI suggestions if Groq API key is available
104
+ if os.getenv("GROQ_API_KEY") and len(errors) == 0:
105
+ ai_suggestions = await self._get_ai_suggestions(workflow)
106
+ suggestions.extend(ai_suggestions)
107
+
108
+ return {
109
+ "valid": len(errors) == 0,
110
+ "errors": errors,
111
+ "warnings": warnings,
112
+ "suggestions": suggestions,
113
+ "node_count": len(workflow.nodes),
114
+ "edge_count": len(workflow.edges)
115
+ }
116
+
117
+ async def analyze_error(self, error: str, context: dict) -> dict:
118
+ """
119
+ Analyze an execution error and suggest fixes.
120
+ Uses AI to understand the error and provide solutions.
121
+ """
122
+ api_key = os.getenv("GROQ_API_KEY")
123
+ if not api_key:
124
+ return {
125
+ "analysis": "Unable to analyze - no API key configured",
126
+ "suggestions": [
127
+ "Check the error message for clues",
128
+ "Verify all node configurations are correct",
129
+ "Ensure required variables are available"
130
+ ]
131
+ }
132
+
133
+ prompt = f"""Analyze this workflow execution error and suggest fixes:
134
+
135
+ Error: {error}
136
+
137
+ Context:
138
+ - Node Type: {context.get('node_type', 'unknown')}
139
+ - Node Config: {context.get('config', {})}
140
+ - Variables Available: {list(context.get('variables', {}).keys())}
141
+
142
+ Provide:
143
+ 1. Brief analysis of what went wrong
144
+ 2. 2-3 specific suggestions to fix it
145
+
146
+ Be concise and actionable."""
147
+
148
+ try:
149
+ async with httpx.AsyncClient() as client:
150
+ response = await client.post(
151
+ "https://api.groq.com/openai/v1/chat/completions",
152
+ headers={
153
+ "Authorization": f"Bearer {api_key}",
154
+ "Content-Type": "application/json"
155
+ },
156
+ json={
157
+ "model": "llama-3.3-70b-versatile",
158
+ "messages": [{"role": "user", "content": prompt}],
159
+ "temperature": 0.3
160
+ },
161
+ timeout=30.0
162
+ )
163
+ response.raise_for_status()
164
+ data = response.json()
165
+ analysis = data["choices"][0]["message"]["content"]
166
+
167
+ return {
168
+ "analysis": analysis,
169
+ "ai_powered": True
170
+ }
171
+ except Exception as e:
172
+ return {
173
+ "analysis": f"Error analyzing: {str(e)}",
174
+ "suggestions": ["Check your API configuration"]
175
+ }
176
+
177
+ async def _get_ai_suggestions(self, workflow: Workflow) -> list[dict]:
178
+ """Get AI-powered suggestions for workflow improvement"""
179
+ api_key = os.getenv("GROQ_API_KEY")
180
+ if not api_key:
181
+ return []
182
+
183
+ # Build workflow description
184
+ nodes_desc = ", ".join([f"{n.data.label} ({n.type})" for n in workflow.nodes])
185
+
186
+ prompt = f"""Analyze this workflow and suggest 1-2 improvements:
187
+
188
+ Workflow: {workflow.name}
189
+ Nodes: {nodes_desc}
190
+ Connections: {len(workflow.edges)}
191
+
192
+ Focus on:
193
+ - Performance optimizations
194
+ - Error handling improvements
195
+ - Best practices
196
+
197
+ Be very concise (1-2 sentences per suggestion)."""
198
+
199
+ try:
200
+ async with httpx.AsyncClient() as client:
201
+ response = await client.post(
202
+ "https://api.groq.com/openai/v1/chat/completions",
203
+ headers={
204
+ "Authorization": f"Bearer {api_key}",
205
+ "Content-Type": "application/json"
206
+ },
207
+ json={
208
+ "model": "llama-3.1-8b-instant",
209
+ "messages": [{"role": "user", "content": prompt}],
210
+ "temperature": 0.5,
211
+ "max_tokens": 200
212
+ },
213
+ timeout=15.0
214
+ )
215
+ response.raise_for_status()
216
+ data = response.json()
217
+ content = data["choices"][0]["message"]["content"]
218
+
219
+ return [{
220
+ "type": "ai_suggestion",
221
+ "message": content,
222
+ "ai_powered": True
223
+ }]
224
+ except:
225
+ return []
226
+
227
+ def _detect_cycle(self, workflow: Workflow) -> bool:
228
+ """Detect if workflow has a cycle using DFS"""
229
+ # Build adjacency list
230
+ graph: dict[str, list[str]] = {node.id: [] for node in workflow.nodes}
231
+ for edge in workflow.edges:
232
+ if edge.source in graph:
233
+ graph[edge.source].append(edge.target)
234
+
235
+ visited = set()
236
+ rec_stack = set()
237
+
238
+ def dfs(node: str) -> bool:
239
+ visited.add(node)
240
+ rec_stack.add(node)
241
+
242
+ for neighbor in graph.get(node, []):
243
+ if neighbor not in visited:
244
+ if dfs(neighbor):
245
+ return True
246
+ elif neighbor in rec_stack:
247
+ return True
248
+
249
+ rec_stack.remove(node)
250
+ return False
251
+
252
+ for node_id in graph:
253
+ if node_id not in visited:
254
+ if dfs(node_id):
255
+ return True
256
+
257
+ return False
258
+
259
+ def create_alert(
260
+ self,
261
+ level: str,
262
+ message: str,
263
+ node_id: Optional[str] = None,
264
+ suggestion: Optional[str] = None
265
+ ) -> GuardianAlert:
266
+ """Create a new Guardian alert"""
267
+ alert = GuardianAlert(
268
+ level=level,
269
+ message=message,
270
+ node_id=node_id,
271
+ suggestion=suggestion
272
+ )
273
+ self.alerts.append(alert)
274
+ return alert
275
+
276
+ def get_alerts(self) -> list[GuardianAlert]:
277
+ """Get all active alerts"""
278
+ return self.alerts
279
+
280
+ def clear_alerts(self):
281
+ """Clear all alerts"""
282
+ self.alerts = []
core/nodes.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Node Executors - Handle execution of different node types
3
+ """
4
+ from typing import Any
5
+ import httpx
6
+ import os
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv()
10
+
11
+
12
+ class NodeExecutor:
13
+ """
14
+ Executes individual nodes based on their type.
15
+ Supports: trigger, action, condition, loop, llm, http, code, transform
16
+ """
17
+
18
+ def __init__(self):
19
+ # LLM provider configurations
20
+ self.llm_providers = {
21
+ "groq": {
22
+ "base_url": "https://api.groq.com/openai/v1",
23
+ "api_key_env": "GROQ_API_KEY",
24
+ "models": ["llama-3.3-70b-versatile", "llama-3.1-8b-instant", "mixtral-8x7b-32768"]
25
+ },
26
+ "openrouter": {
27
+ "base_url": "https://openrouter.ai/api/v1",
28
+ "api_key_env": "OPENROUTER_API_KEY",
29
+ "models": ["anthropic/claude-3.5-sonnet", "openai/gpt-4o", "google/gemini-2.0-flash-exp:free"]
30
+ },
31
+ "cerebras": {
32
+ "base_url": "https://api.cerebras.ai/v1",
33
+ "api_key_env": "CEREBRAS_API_KEY",
34
+ "models": ["llama3.1-8b", "llama3.1-70b"]
35
+ },
36
+ "gemini": {
37
+ "base_url": "https://generativelanguage.googleapis.com/v1beta",
38
+ "api_key_env": "GEMINI_API_KEY",
39
+ "models": ["gemini-2.0-flash-exp", "gemini-1.5-pro"]
40
+ }
41
+ }
42
+
43
+ async def execute(self, node_type: str, config: dict, variables: dict) -> Any:
44
+ """Execute a node and return its output"""
45
+ executor = getattr(self, f"_execute_{node_type}", None)
46
+ if not executor:
47
+ raise ValueError(f"Unknown node type: {node_type}")
48
+
49
+ return await executor(config, variables)
50
+
51
+ async def _execute_trigger(self, config: dict, variables: dict) -> dict:
52
+ """Trigger node - starts the workflow"""
53
+ return {
54
+ "triggered": True,
55
+ "label": config.get("label", "Start"),
56
+ "timestamp": __import__("datetime").datetime.now().isoformat()
57
+ }
58
+
59
+ async def _execute_action(self, config: dict, variables: dict) -> dict:
60
+ """Generic action node"""
61
+ action_type = config.get("config", {}).get("action_type", "log")
62
+
63
+ if action_type == "log":
64
+ message = config.get("config", {}).get("message", "Action executed")
65
+ print(f"[ACTION] {message}")
66
+ return {"logged": message}
67
+
68
+ return {"action": action_type, "executed": True}
69
+
70
+ async def _execute_condition(self, config: dict, variables: dict) -> dict:
71
+ """Condition node - evaluates a condition"""
72
+ condition = config.get("config", {}).get("condition", "true")
73
+
74
+ # Simple condition evaluation (be careful with eval in production!)
75
+ try:
76
+ # Replace variable references with actual values
77
+ for var_name, var_value in variables.items():
78
+ condition = condition.replace(f"${{{var_name}}}", str(var_value))
79
+
80
+ result = eval(condition, {"__builtins__": {}}, variables)
81
+ return {"condition": condition, "result": bool(result)}
82
+ except Exception as e:
83
+ return {"condition": condition, "result": False, "error": str(e)}
84
+
85
+ async def _execute_loop(self, config: dict, variables: dict) -> dict:
86
+ """Loop node - iterates over data"""
87
+ items = config.get("config", {}).get("items", [])
88
+ if isinstance(items, str) and items.startswith("${"):
89
+ # Reference to variable
90
+ var_name = items[2:-1]
91
+ items = variables.get(var_name, [])
92
+
93
+ return {"items": items, "count": len(items) if isinstance(items, list) else 0}
94
+
95
+ async def _execute_llm(self, config: dict, variables: dict) -> dict:
96
+ """LLM node - calls AI model"""
97
+ provider = config.get("provider", "groq")
98
+ model = config.get("model", "llama-3.3-70b-versatile")
99
+ prompt = config.get("prompt", "Hello!")
100
+ temperature = config.get("temperature", 0.7)
101
+
102
+ # Replace variables in prompt
103
+ for var_name, var_value in variables.items():
104
+ if isinstance(var_value, dict):
105
+ var_value = str(var_value)
106
+ prompt = prompt.replace(f"${{{var_name}}}", str(var_value))
107
+
108
+ provider_config = self.llm_providers.get(provider)
109
+ if not provider_config:
110
+ raise ValueError(f"Unknown LLM provider: {provider}")
111
+
112
+ api_key = os.getenv(provider_config["api_key_env"])
113
+ if not api_key:
114
+ # Return mock response if no API key
115
+ return {
116
+ "provider": provider,
117
+ "model": model,
118
+ "response": f"[Mock response - set {provider_config['api_key_env']} for real responses]",
119
+ "prompt": prompt
120
+ }
121
+
122
+ # Call LLM API
123
+ if provider == "gemini":
124
+ response = await self._call_gemini(api_key, model, prompt, temperature)
125
+ else:
126
+ response = await self._call_openai_compatible(
127
+ provider_config["base_url"],
128
+ api_key,
129
+ model,
130
+ prompt,
131
+ temperature
132
+ )
133
+
134
+ return {
135
+ "provider": provider,
136
+ "model": model,
137
+ "response": response,
138
+ "prompt": prompt
139
+ }
140
+
141
+ async def _call_openai_compatible(
142
+ self,
143
+ base_url: str,
144
+ api_key: str,
145
+ model: str,
146
+ prompt: str,
147
+ temperature: float
148
+ ) -> str:
149
+ """Call OpenAI-compatible API (Groq, OpenRouter, Cerebras)"""
150
+ async with httpx.AsyncClient() as client:
151
+ response = await client.post(
152
+ f"{base_url}/chat/completions",
153
+ headers={
154
+ "Authorization": f"Bearer {api_key}",
155
+ "Content-Type": "application/json"
156
+ },
157
+ json={
158
+ "model": model,
159
+ "messages": [{"role": "user", "content": prompt}],
160
+ "temperature": temperature
161
+ },
162
+ timeout=60.0
163
+ )
164
+ response.raise_for_status()
165
+ data = response.json()
166
+ return data["choices"][0]["message"]["content"]
167
+
168
+ async def _call_gemini(
169
+ self,
170
+ api_key: str,
171
+ model: str,
172
+ prompt: str,
173
+ temperature: float
174
+ ) -> str:
175
+ """Call Gemini API"""
176
+ async with httpx.AsyncClient() as client:
177
+ response = await client.post(
178
+ f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent",
179
+ params={"key": api_key},
180
+ json={
181
+ "contents": [{"parts": [{"text": prompt}]}],
182
+ "generationConfig": {"temperature": temperature}
183
+ },
184
+ timeout=60.0
185
+ )
186
+ response.raise_for_status()
187
+ data = response.json()
188
+ return data["candidates"][0]["content"]["parts"][0]["text"]
189
+
190
+ async def _execute_http(self, config: dict, variables: dict) -> dict:
191
+ """HTTP request node"""
192
+ method = config.get("config", {}).get("method", "GET")
193
+ url = config.get("config", {}).get("url", "")
194
+ headers = config.get("config", {}).get("headers", {})
195
+ body = config.get("config", {}).get("body", None)
196
+
197
+ # Replace variables
198
+ for var_name, var_value in variables.items():
199
+ url = url.replace(f"${{{var_name}}}", str(var_value))
200
+
201
+ async with httpx.AsyncClient() as client:
202
+ response = await client.request(
203
+ method=method,
204
+ url=url,
205
+ headers=headers,
206
+ json=body if body else None,
207
+ timeout=30.0
208
+ )
209
+
210
+ try:
211
+ data = response.json()
212
+ except:
213
+ data = response.text
214
+
215
+ return {
216
+ "status_code": response.status_code,
217
+ "data": data
218
+ }
219
+
220
+ async def _execute_code(self, config: dict, variables: dict) -> dict:
221
+ """Custom code execution node (sandboxed)"""
222
+ code = config.get("config", {}).get("code", "return {}")
223
+
224
+ # Very basic sandboxed execution
225
+ # In production, use a proper sandbox like RestrictedPython
226
+ try:
227
+ # Create a function from the code
228
+ local_vars = {"variables": variables, "result": None}
229
+ exec(f"result = {code}", {"__builtins__": {}}, local_vars)
230
+ return {"result": local_vars.get("result")}
231
+ except Exception as e:
232
+ return {"error": str(e)}
233
+
234
+ async def _execute_transform(self, config: dict, variables: dict) -> dict:
235
+ """Data transformation node"""
236
+ transform_type = config.get("config", {}).get("transform", "passthrough")
237
+ input_data = config.get("config", {}).get("input", None)
238
+
239
+ # Resolve input from variables
240
+ if isinstance(input_data, str) and input_data.startswith("${"):
241
+ var_name = input_data[2:-1]
242
+ input_data = variables.get(var_name)
243
+
244
+ if transform_type == "passthrough":
245
+ return {"data": input_data}
246
+ elif transform_type == "json_parse":
247
+ import json
248
+ return {"data": json.loads(input_data) if isinstance(input_data, str) else input_data}
249
+ elif transform_type == "uppercase":
250
+ return {"data": str(input_data).upper()}
251
+ elif transform_type == "lowercase":
252
+ return {"data": str(input_data).lower()}
253
+
254
+ return {"data": input_data}
main.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tritan API - AI-Native Workflow Engine
3
+ """
4
+ from fastapi import FastAPI
5
+ from fastapi.middleware.cors import CORSMiddleware
6
+ from api import workflows, execute, planner
7
+
8
+ app = FastAPI(
9
+ title="Tritan API",
10
+ description="AI-Native Workflow Engine",
11
+ version="0.1.0"
12
+ )
13
+
14
+ # CORS for frontend (GitHub Pages)
15
+ app.add_middleware(
16
+ CORSMiddleware,
17
+ allow_origins=["*"], # Will restrict to GitHub Pages URL later
18
+ allow_credentials=True,
19
+ allow_methods=["*"],
20
+ allow_headers=["*"],
21
+ )
22
+
23
+ # Include routers
24
+ app.include_router(workflows.router, prefix="/api/workflows", tags=["workflows"])
25
+ app.include_router(execute.router, prefix="/api/execute", tags=["execute"])
26
+ app.include_router(planner.router, prefix="/api/planner", tags=["planner"])
27
+
28
+
29
+ @app.get("/")
30
+ async def root():
31
+ return {
32
+ "name": "Tritan API",
33
+ "version": "0.1.0",
34
+ "status": "running",
35
+ "docs": "/docs"
36
+ }
37
+
38
+
39
+ @app.get("/health")
40
+ async def health():
41
+ return {"status": "healthy", "version": "0.1.0"}
models/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Models package
2
+ from .workflow import Workflow, Node, Edge, NodeType
3
+ from .execution import ExecutionState, ExecutionResult, NodeResult
4
+
5
+ __all__ = [
6
+ "Workflow", "Node", "Edge", "NodeType",
7
+ "ExecutionState", "ExecutionResult", "NodeResult"
8
+ ]
models/execution.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Execution state and result models
3
+ """
4
+ from pydantic import BaseModel, Field
5
+ from typing import Optional, Any
6
+ from enum import Enum
7
+ from datetime import datetime
8
+ import uuid
9
+
10
+
11
+ class ExecutionStatus(str, Enum):
12
+ """Status of workflow execution"""
13
+ PENDING = "pending"
14
+ RUNNING = "running"
15
+ COMPLETED = "completed"
16
+ FAILED = "failed"
17
+ CANCELLED = "cancelled"
18
+
19
+
20
+ class NodeResult(BaseModel):
21
+ """Result of executing a single node"""
22
+ node_id: str
23
+ status: ExecutionStatus
24
+ output: Optional[Any] = None
25
+ error: Optional[str] = None
26
+ started_at: Optional[str] = None
27
+ completed_at: Optional[str] = None
28
+ duration_ms: Optional[float] = None
29
+
30
+
31
+ class ExecutionState(BaseModel):
32
+ """Current state of workflow execution"""
33
+ execution_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
34
+ workflow_id: str
35
+ status: ExecutionStatus = ExecutionStatus.PENDING
36
+ current_node: Optional[str] = None
37
+ node_results: dict[str, NodeResult] = Field(default_factory=dict)
38
+ variables: dict[str, Any] = Field(default_factory=dict) # Shared state
39
+ started_at: Optional[str] = None
40
+ completed_at: Optional[str] = None
41
+
42
+
43
+ class ExecutionResult(BaseModel):
44
+ """Final result of workflow execution"""
45
+ execution_id: str
46
+ workflow_id: str
47
+ status: ExecutionStatus
48
+ node_results: list[NodeResult]
49
+ final_output: Optional[Any] = None
50
+ error: Optional[str] = None
51
+ total_duration_ms: Optional[float] = None
52
+ started_at: str
53
+ completed_at: str
54
+
55
+
56
+ class GuardianAlert(BaseModel):
57
+ """Alert from the Guardian system"""
58
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
59
+ level: str # info, warning, error
60
+ message: str
61
+ node_id: Optional[str] = None
62
+ suggestion: Optional[str] = None
63
+ timestamp: str = Field(default_factory=lambda: datetime.now().isoformat())
models/workflow.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Workflow data models
3
+ """
4
+ from pydantic import BaseModel, Field
5
+ from typing import Optional, Any
6
+ from enum import Enum
7
+ import uuid
8
+
9
+
10
+ class NodeType(str, Enum):
11
+ """Types of nodes available in Tritan"""
12
+ TRIGGER = "trigger" # Start of workflow
13
+ ACTION = "action" # Generic action
14
+ CONDITION = "condition" # If/else branching
15
+ LOOP = "loop" # Iterate over data
16
+ LLM = "llm" # AI/LLM node
17
+ HTTP = "http" # HTTP request
18
+ CODE = "code" # Custom code execution
19
+ TRANSFORM = "transform" # Data transformation
20
+
21
+
22
+ class Position(BaseModel):
23
+ """Node position on canvas"""
24
+ x: float
25
+ y: float
26
+
27
+
28
+ class NodeData(BaseModel):
29
+ """Node configuration data"""
30
+ label: str
31
+ description: Optional[str] = None
32
+ config: dict[str, Any] = Field(default_factory=dict)
33
+ # For LLM nodes
34
+ provider: Optional[str] = None # groq, openrouter, cerebras, gemini
35
+ model: Optional[str] = None
36
+ prompt: Optional[str] = None
37
+ temperature: Optional[float] = 0.7
38
+
39
+
40
+ class Node(BaseModel):
41
+ """A node in the workflow"""
42
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
43
+ type: NodeType
44
+ position: Position
45
+ data: NodeData
46
+
47
+
48
+ class Edge(BaseModel):
49
+ """Connection between nodes"""
50
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
51
+ source: str # Source node ID
52
+ target: str # Target node ID
53
+ sourceHandle: Optional[str] = None # For condition nodes (true/false)
54
+ targetHandle: Optional[str] = None
55
+
56
+
57
+ class Workflow(BaseModel):
58
+ """A complete workflow definition"""
59
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()))
60
+ name: str
61
+ description: Optional[str] = None
62
+ nodes: list[Node] = Field(default_factory=list)
63
+ edges: list[Edge] = Field(default_factory=list)
64
+ created_at: Optional[str] = None
65
+ updated_at: Optional[str] = None
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi>=0.109.0
2
+ uvicorn[standard]>=0.27.0
3
+ pydantic>=2.5.0
4
+ httpx>=0.26.0
5
+ python-dotenv>=1.0.0
6
+ litellm>=1.16.0