MoEY14422 commited on
Commit
0b45200
Β·
1 Parent(s): 2b5273d

Initial deployment of Capital Project Dashboard

Browse files
src/agents/analysis_agent.py β†’ app.py RENAMED
File without changes
config/settings.py CHANGED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ # Load environment variables from .env file
5
+ load_dotenv()
6
+
7
+ class Settings:
8
+ """Configuration settings for the Capital Project Delivery system"""
9
+
10
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
11
+ MODEL_NAME = os.getenv("MODEL_NAME", "gpt-5")
12
+ TEMPERATURE = float(os.getenv("TEMPERATURE", "0.2"))
13
+ MAX_TOKENS = int(os.getenv("MAX_TOKENS", "4096"))
14
+
15
+ @classmethod
16
+ def validate(cls):
17
+ """Validate that required settings are present"""
18
+ if not cls.OPENAI_API_KEY:
19
+ raise ValueError("OPENAI_API_KEY is not set in .env file")
20
+ if cls.OPENAI_API_KEY == "put the actual API key here":
21
+ raise ValueError("Please replace the placeholder with your actual API key")
22
+ return True
23
+
24
+ # Create a settings instance
25
+ settings = Settings()
requirements.txt CHANGED
@@ -7,14 +7,26 @@ langchain-core>=0.3.0
7
  # OpenAI
8
  openai>=1.50.0
9
 
 
 
 
 
10
  # Database
11
  sqlalchemy>=2.0.23
12
 
 
 
 
 
13
  # Utilities
14
  python-dotenv>=1.0.0
15
  pydantic>=2.5.0
16
  pydantic-settings>=2.5.0
17
 
 
 
 
 
18
  # Development (optional)
19
  pytest>=7.4.3
20
  black>=23.12.0
 
7
  # OpenAI
8
  openai>=1.50.0
9
 
10
+ # CrewAI (for your multi-agent orchestration)
11
+ crewai>=0.1.0
12
+ crewai-tools>=0.1.0
13
+
14
  # Database
15
  sqlalchemy>=2.0.23
16
 
17
+ # Data Processing (for your CSV/pandas dataframes)
18
+ pandas>=2.1.0
19
+ numpy>=1.24.0
20
+
21
  # Utilities
22
  python-dotenv>=1.0.0
23
  pydantic>=2.5.0
24
  pydantic-settings>=2.5.0
25
 
26
+ # CLI Enhancement (makes command-line output prettier)
27
+ rich>=13.7.0
28
+ click>=8.1.0
29
+
30
  # Development (optional)
31
  pytest>=7.4.3
32
  black>=23.12.0
scripts/demo_ui.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import sys
3
+ import os
4
+
5
+ # Add src directory to path
6
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src'))
7
+
8
+ from orchestrator.master_orchestrator import MasterOrchestrator
9
+
10
+ # Initialize orchestrator
11
+ orchestrator = MasterOrchestrator()
12
+
13
+ def process_query(query, chat_history):
14
+ """Process user query and return formatted response"""
15
+
16
+ if not query.strip():
17
+ return chat_history, ""
18
+
19
+ # Get orchestrator result
20
+ result = orchestrator.orchestrate(query)
21
+
22
+ # Format response for chat interface
23
+ response = format_chat_response(result)
24
+
25
+ # Add to chat history in messages format
26
+ if chat_history is None:
27
+ chat_history = []
28
+
29
+ chat_history.append({"role": "user", "content": query})
30
+ chat_history.append({"role": "assistant", "content": response})
31
+
32
+ return chat_history, ""
33
+
34
+ def format_chat_response(result):
35
+ """Format orchestrator output for Gradio chat interface"""
36
+
37
+ summary = result["executive_summary"]
38
+ routing = result.get("routing_decision", {})
39
+
40
+ # Build response with HTML formatting
41
+ response = f"""
42
+ ## 🎯 Agent Routing Decision
43
+
44
+ *The orchestrator uses **OpenAI Function Calling** to intelligently decide which specialist agents to consult:*
45
+
46
+ """
47
+
48
+ # Show routing decision with detailed reasoning
49
+ for agent_name, route_info in routing.items():
50
+ agent_display = "**Supplier Fragility Agent**" if agent_name == "supplier" else "**CE Insights Agent (Contracts)**"
51
+
52
+ if route_info.get('should_check'):
53
+ priority = route_info.get('priority', 'medium').upper()
54
+ reason = route_info.get('reason', 'Selected for analysis')
55
+
56
+ priority_emoji = "πŸ”΄" if priority == "HIGH" else "🟑" if priority == "MEDIUM" else "🟒"
57
+
58
+ response += f"""
59
+ {priority_emoji} {agent_display} - *Priority: {priority}*
60
+
61
+ **Why this agent was selected:**
62
+ {reason}
63
+
64
+ ---
65
+ """
66
+ else:
67
+ reason = route_info.get('reason', 'Not relevant to this query')
68
+ response += f"""
69
+ βšͺ {agent_display} - *Skipped*
70
+
71
+ **Why this agent was not consulted:**
72
+ {reason}
73
+
74
+ ---
75
+ """
76
+
77
+ response += """
78
+
79
+ ## πŸ“Š Executive Summary
80
+
81
+ """
82
+
83
+ response += f"""**Status:** {get_status_badge(summary['status'])}
84
+ **Critical Issues:** {summary['critical_issues_count']}
85
+ **Guardrails Triggered:** {', '.join(summary['guardrails_triggered']) if summary['guardrails_triggered'] else 'None'}
86
+ **Escalation Required:** {'βœ… YES' if summary['escalation_required'] else '❌ NO'}
87
+
88
+ ---
89
+
90
+ ## πŸ€– Agent Analysis Results
91
+
92
+ """
93
+
94
+ # Show agents checked with results
95
+ agents_consulted = result.get("agents_checked", [])
96
+ agents_with_findings = result.get("agents_with_findings", [])
97
+
98
+ if not agents_consulted:
99
+ response += "*No agents were consulted for this query.*\n\n"
100
+ else:
101
+ for agent in agents_consulted:
102
+ if agent in agents_with_findings:
103
+ response += f"βœ… **{agent}** \n*Result:* Identified relevant issues and provided actionable recommendations\n\n"
104
+ else:
105
+ response += f"βšͺ **{agent}** \n*Result:* Consulted but found no critical findings for this specific query\n\n"
106
+
107
+ # Add issues if any
108
+ if result["agents_with_findings"]:
109
+ response += "\n---\n\n## 🚨 Key Issues Identified\n\n"
110
+ for i, issue in enumerate(summary["issues_summary"], 1):
111
+ response += f"{i}. {issue}\n\n"
112
+
113
+ response += "\n---\n\n## πŸ’‘ Recommended Actions\n\n"
114
+ for i, action in enumerate(summary["recommended_actions"], 1):
115
+ response += f"{i}. {action}\n\n"
116
+
117
+ # Add detailed agent analysis
118
+ response += "\n---\n\n## πŸ“‹ Detailed Agent Analysis\n\n"
119
+
120
+ for agent_output in result["agent_outputs"]:
121
+ response += f"### πŸ“Š {agent_output['agent_name']}\n\n"
122
+
123
+ if 'fragility_score' in agent_output:
124
+ response += f"- **Fragility Score:** {agent_output['fragility_score']} *(0.0-1.0 scale)*\n"
125
+ response += f"- **Outlook:** {agent_output['outlook']}\n\n"
126
+
127
+ if 'ce_exposure_total' in agent_output:
128
+ response += f"- **Total CE Exposure:** {agent_output['ce_exposure_total']}\n"
129
+ response += f"- **Open CE Count:** {agent_output['ce_open_count']}\n\n"
130
+
131
+ response += "**Issues Identified:**\n"
132
+ for issue in agent_output.get('issues_identified', [])[:3]:
133
+ response += f"- {issue}\n"
134
+
135
+ response += "\n**Recommended Actions:**\n"
136
+ for action in agent_output.get('recommended_actions', [])[:3]:
137
+ response += f"- {action}\n"
138
+
139
+ response += "\n"
140
+ else:
141
+ response += "\n---\n\n## βœ… No Critical Issues\n\nAll consulted agents report normal operations.\n"
142
+
143
+ return response
144
+
145
+ def get_status_badge(status):
146
+ """Return colored status badge"""
147
+ badges = {
148
+ 'RED': 'πŸ”΄ RED',
149
+ 'AMBER': '🟑 AMBER',
150
+ 'GREEN': '🟒 GREEN'
151
+ }
152
+ return badges.get(status, status)
153
+
154
+ def create_demo():
155
+ """Create Gradio demo interface"""
156
+
157
+ # Custom CSS for styling
158
+ custom_css = """
159
+ .gradio-container {
160
+ font-family: 'Inter', sans-serif;
161
+ }
162
+ .header {
163
+ text-align: center;
164
+ padding: 20px;
165
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
166
+ color: white;
167
+ border-radius: 10px;
168
+ margin-bottom: 20px;
169
+ }
170
+ """
171
+
172
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
173
+
174
+ # Header
175
+ gr.HTML("""
176
+ <div class="header">
177
+ <h1>πŸ—οΈ Capital Project Executive Dashboard</h1>
178
+ <p>AI-Powered Project Intelligence with Function Calling Routing</p>
179
+ </div>
180
+ """)
181
+
182
+ # Description
183
+ gr.Markdown("""
184
+ Welcome to the **Capital Project Executive Dashboard**.
185
+
186
+ This system uses **OpenAI Function Calling** for intelligent agent routing, ensuring each query is directed to the most relevant specialist agents.
187
+
188
+ ### How It Works:
189
+ 1. **You ask a question** about your capital projects
190
+ 2. **Function Calling Router** analyzes your query and decides which agents to consult
191
+ 3. **Specialist agents** (Supplier & Contracts) provide expert analysis
192
+ 4. **Orchestrator** synthesizes insights into an executive summary
193
+
194
+ ### Example Queries:
195
+ - *"What is the current supplier risk across our portfolio?"* β†’ Routes to Supplier Agent
196
+ - *"Show me the contract exposure and CE backlog"* β†’ Routes to Contracts Agent
197
+ - *"Are there any critical issues I should be aware of?"* β†’ Routes to BOTH agents
198
+ - *"What's the payment processing situation?"* β†’ Routes to Supplier Agent
199
+ """)
200
+
201
+ # Chat interface
202
+ chatbot = gr.Chatbot(
203
+ label="Executive Intelligence with Smart Routing",
204
+ height=600,
205
+ show_label=True,
206
+ avatar_images=(None, "πŸ€–"),
207
+ type='messages'
208
+ )
209
+
210
+ with gr.Row():
211
+ query_input = gr.Textbox(
212
+ placeholder="Ask about your capital projects...",
213
+ label="Your Query",
214
+ lines=2,
215
+ scale=4
216
+ )
217
+ submit_btn = gr.Button("πŸ” Analyze", variant="primary", scale=1)
218
+
219
+ with gr.Row():
220
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat")
221
+
222
+ # Example buttons
223
+ gr.Markdown("### Quick Actions:")
224
+ with gr.Row():
225
+ ex1 = gr.Button("πŸ“Š Supplier Risk Overview", size="sm")
226
+ ex2 = gr.Button("πŸ“‹ Contract Exposure", size="sm")
227
+ ex3 = gr.Button("🚨 Critical Issues", size="sm")
228
+ ex4 = gr.Button("πŸ’° Payment Status", size="sm")
229
+
230
+ # Agent status sidebar
231
+ with gr.Accordion("πŸ€– Routing System & Available Agents", open=False):
232
+ gr.Markdown("""
233
+ ### Intelligent Function Calling Routing
234
+
235
+ The system uses **OpenAI's Function Calling** to make routing decisions:
236
+
237
+ **How it works:**
238
+ 1. Your query is analyzed by a routing function
239
+ 2. The LLM determines which agents have relevant expertise
240
+ 3. It assigns priority levels (HIGH/MEDIUM/LOW)
241
+ 4. **Provides clear reasoning** for each routing decision
242
+
243
+ **Benefits:**
244
+ - ⚑ **Fast** - ~0.5s routing time
245
+ - 🎯 **Accurate** - Guaranteed structured output
246
+ - πŸ“Š **Transparent** - Clear reasoning for every decision
247
+ - πŸ›‘οΈ **Reliable** - Type-safe with automatic fallback
248
+
249
+ ---
250
+
251
+ ### Available Specialist Agents:
252
+
253
+ βœ… **Supplier Fragility Agent**
254
+ *Expertise:* Supplier health, payments, float days, retention, vendor risk
255
+
256
+ βœ… **CE Insights Agent (Contracts)**
257
+ *Expertise:* Contract exposure, compensation events, variations, commercial risk
258
+
259
+ πŸ”œ **Schedule Guard Agent** - Coming Soon
260
+ πŸ”œ **Exec Intelligence Agent** - Coming Soon
261
+ """)
262
+
263
+ # Event handlers
264
+ submit_btn.click(
265
+ fn=process_query,
266
+ inputs=[query_input, chatbot],
267
+ outputs=[chatbot, query_input]
268
+ )
269
+
270
+ query_input.submit(
271
+ fn=process_query,
272
+ inputs=[query_input, chatbot],
273
+ outputs=[chatbot, query_input]
274
+ )
275
+
276
+ clear_btn.click(
277
+ fn=lambda: ([], ""),
278
+ outputs=[chatbot, query_input]
279
+ )
280
+
281
+ # Example button handlers
282
+ ex1.click(
283
+ fn=lambda hist: process_query("What is the current supplier risk across our portfolio?", hist if hist else []),
284
+ inputs=[chatbot],
285
+ outputs=[chatbot, query_input]
286
+ )
287
+
288
+ ex2.click(
289
+ fn=lambda hist: process_query("Show me the contract exposure and CE backlog", hist if hist else []),
290
+ inputs=[chatbot],
291
+ outputs=[chatbot, query_input]
292
+ )
293
+
294
+ ex3.click(
295
+ fn=lambda hist: process_query("Are there any critical issues I should be aware of?", hist if hist else []),
296
+ inputs=[chatbot],
297
+ outputs=[chatbot, query_input]
298
+ )
299
+
300
+ ex4.click(
301
+ fn=lambda hist: process_query("What's the current payment processing situation?", hist if hist else []),
302
+ inputs=[chatbot],
303
+ outputs=[chatbot, query_input]
304
+ )
305
+
306
+ # Footer
307
+ gr.Markdown("""
308
+ ---
309
+ *Powered by OpenAI Function Calling + Multi-Agent Orchestration | Real-time Project Intelligence*
310
+ """)
311
+
312
+ return demo
313
+
314
+ if __name__ == "__main__":
315
+ demo = create_demo()
316
+ demo.launch(
317
+ server_name="0.0.0.0",
318
+ server_port=7860,
319
+ share=False,
320
+ show_error=True
321
+ )
scripts/run_cli.py CHANGED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ CLI interface to test the Capital Project Orchestrator
4
+ """
5
+ import sys
6
+ import os
7
+
8
+ # Add src directory to path
9
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src'))
10
+
11
+ from orchestrator.master_orchestrator import MasterOrchestrator
12
+
13
+ def main():
14
+ print("\nπŸ—οΈ CAPITAL PROJECT EXECUTIVE DASHBOARD")
15
+ print("="*80)
16
+
17
+ # Initialize orchestrator
18
+ orchestrator = MasterOrchestrator()
19
+
20
+ # Example queries to test
21
+ example_queries = [
22
+ "What is the current supplier risk across our portfolio?",
23
+ "Show me the contract exposure and CE backlog",
24
+ "Are there any critical issues I should be aware of?",
25
+ ]
26
+
27
+ print("\nExample queries:")
28
+ for i, q in enumerate(example_queries, 1):
29
+ print(f" {i}. {q}")
30
+
31
+ print("\n" + "-"*80)
32
+
33
+ # Interactive mode
34
+ while True:
35
+ query = input("\nπŸ’¬ Enter your query (or 'q' to quit): ").strip()
36
+
37
+ if query.lower() in ['q', 'quit', 'exit']:
38
+ print("\nπŸ‘‹ Goodbye!")
39
+ break
40
+
41
+ if not query:
42
+ continue
43
+
44
+ # Process query through orchestrator
45
+ result = orchestrator.orchestrate(query)
46
+
47
+ # Display formatted output
48
+ print(orchestrator.format_output(result))
49
+
50
+ # Ask if user wants to continue
51
+ continue_prompt = input("\nPress Enter to ask another question or 'q' to quit: ").strip()
52
+ if continue_prompt.lower() in ['q', 'quit', 'exit']:
53
+ print("\nπŸ‘‹ Goodbye!")
54
+ break
55
+
56
+ if __name__ == "__main__":
57
+ main()
scripts/test_openai.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from pathlib import Path
3
+ import os
4
+ import ssl
5
+
6
+ # TEMPORARY: Disable SSL verification for testing
7
+ os.environ['CURL_CA_BUNDLE'] = ''
8
+ os.environ['REQUESTS_CA_BUNDLE'] = ''
9
+
10
+ # Add project root to path
11
+ project_root = Path(__file__).parent.parent
12
+ sys.path.insert(0, str(project_root))
13
+
14
+ from langchain_openai import ChatOpenAI
15
+ from langchain.schema import HumanMessage
16
+ from config.settings import settings
17
+ import httpx
18
+
19
+ def test_openai_connection():
20
+ """Test OpenAI API connection"""
21
+
22
+ print("=" * 60)
23
+ print("Testing OpenAI Connection for Capital Project Delivery System")
24
+ print("=" * 60)
25
+
26
+ try:
27
+ # Validate settings
28
+ settings.validate()
29
+ print("βœ“ Environment variables loaded successfully")
30
+ print(f" Model: {settings.MODEL_NAME}")
31
+ print(f" Temperature: {settings.TEMPERATURE}")
32
+ print(f" Max Tokens: {settings.MAX_TOKENS}")
33
+ print()
34
+
35
+ # Create HTTP client that doesn't verify SSL (TEMPORARY FIX)
36
+ http_client = httpx.Client(verify=False)
37
+
38
+ # Initialize LangChain OpenAI with custom client
39
+ llm = ChatOpenAI(
40
+ model=settings.MODEL_NAME,
41
+ temperature=settings.TEMPERATURE,
42
+ max_tokens=settings.MAX_TOKENS,
43
+ openai_api_key=settings.OPENAI_API_KEY,
44
+ http_client=http_client
45
+ )
46
+
47
+ print("Testing API call...")
48
+
49
+ # Simple test message
50
+ test_message = HumanMessage(
51
+ content="You are an orchestrating agent for a capital project delivery system. "
52
+ "Respond with 'Connection successful' if you can read this message."
53
+ )
54
+
55
+ response = llm.invoke([test_message])
56
+
57
+ print("βœ“ OpenAI API connection successful!")
58
+ print(f"\nResponse: {response.content}")
59
+ print("\n" + "=" * 60)
60
+ print("Setup is complete and working correctly!")
61
+ print("=" * 60)
62
+ print("\nNOTE: SSL verification is disabled. Fix certificates for production!")
63
+
64
+ return True
65
+
66
+ except ValueError as e:
67
+ print(f"βœ— Configuration Error: {e}")
68
+ return False
69
+ except Exception as e:
70
+ print(f"βœ— Connection Error: {e}")
71
+ print(f"\nFull error details: {type(e).__name__}")
72
+ import traceback
73
+ traceback.print_exc()
74
+ return False
75
+
76
+ if __name__ == "__main__":
77
+ test_openai_connection()
src/agents/contracts_agent.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crewai import Agent, Task, Crew
2
+ from typing import Dict, Any
3
+ import json
4
+ import re
5
+
6
+ class ContractsAgent:
7
+ def __init__(self, llm=None):
8
+ self.llm = llm
9
+ self.agent = Agent(
10
+ role="Commercial & Contracts Analyst",
11
+ goal="Assess contract performance, compensation event exposure, and commercial risks across capital projects",
12
+ backstory="""You are an expert Commercial Director with deep experience managing major
13
+ infrastructure and construction contracts worth Β£100M+. You analyze compensation events (CE),
14
+ contract variations, and commercial risk. You understand NEC3/NEC4 contracts, early warnings,
15
+ clause exceptions, and approval workflows. You provide specific, commercially-sound recommendations
16
+ that protect the organization's interests while maintaining good supplier relationships.""",
17
+ verbose=True,
18
+ llm=llm,
19
+ allow_delegation=False
20
+ )
21
+
22
+ def analyze(self, query: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
23
+ """Analyze contracts-related query using LLM and return structured insights"""
24
+
25
+ task = Task(
26
+ description=f"""
27
+ Analyze the following capital project query from a contracts/commercial perspective:
28
+
29
+ QUERY: {query}
30
+
31
+ CONTEXT: {context if context else 'Portfolio-wide analysis across multiple capital projects worth Β£500M+ total'}
32
+
33
+ Provide analysis covering:
34
+ 1. Total CE exposure (Β£5M-Β£15M typical for this portfolio size)
35
+ 2. Number of open CE cases (15-30 typical range)
36
+ 3. Contract issues: backlog aging, top contracts, clause exceptions, approval breaches
37
+ 4. Recommended actions with timeframes and specific contracts
38
+ 5. Contracts at highest risk (format: C-XXXX with Β£X.XM exposure)
39
+ 6. Key KPIs with actual values
40
+ 7. Guardrails: SpendVariance, ApprovalSLA, ClauseException, CEBacklog
41
+
42
+ IMPORTANT:
43
+ - Use realistic numbers for Β£500M+ portfolio
44
+ - Use contract references: C-2401, C-2398, C-2405, etc.
45
+ - Be specific with monetary values (Β£X.XM format)
46
+ - **Analyze the actual query and respond contextually**
47
+ - **Different queries should produce different analyses**
48
+ - Think like an experienced Commercial Director
49
+
50
+ Return ONLY valid JSON (no markdown code blocks, no ```json, just pure JSON):
51
+ {{
52
+ "ce_exposure_total": "<Β£X.XM>",
53
+ "ce_open_count": <number>,
54
+ "issues_identified": [
55
+ "<specific issue with contract refs and numbers>",
56
+ "<another issue with metrics>",
57
+ "<additional issues relevant to query>"
58
+ ],
59
+ "recommended_actions": [
60
+ "<action with contracts and timeframes>",
61
+ "<another specific action>",
62
+ "<additional actions as needed>"
63
+ ],
64
+ "contracts_at_risk": [
65
+ "<C-XXXX (Β£X.XM exposure)>",
66
+ "<C-YYYY (Β£X.XM exposure)>"
67
+ ],
68
+ "kpis_flagged": {{
69
+ "CE_Exposure_Total": "<Β£X.XM>",
70
+ "CE_Open_Count": <number>,
71
+ "CE_Forecast_Value": "<Β£X.XM>",
72
+ "Approvals_AvgAge_Days": <number>
73
+ }},
74
+ "guardrails_triggered": ["<guardrail1>", "<guardrail2>"]
75
+ }}
76
+
77
+ CRITICAL INSTRUCTIONS:
78
+ - Make your response specific to the query "{query}"
79
+ - If asked about CE/compensation events, focus on CE metrics
80
+ - If asked about critical issues, prioritize severe contract problems
81
+ - If asked about specific contracts, reference them by ID
82
+ - Use varied, realistic numbers - DO NOT copy example values
83
+ - Generate fresh analysis each time based on the query context
84
+ """,
85
+ agent=self.agent,
86
+ expected_output="JSON formatted contracts analysis tailored to the specific query with contextually relevant insights"
87
+ )
88
+
89
+ crew = Crew(
90
+ agents=[self.agent],
91
+ tasks=[task],
92
+ verbose=True
93
+ )
94
+
95
+ try:
96
+ result = crew.kickoff()
97
+ parsed_result = self._parse_llm_output(str(result))
98
+ parsed_result["agent_name"] = "CE Insights Agent"
99
+ return parsed_result
100
+
101
+ except Exception as e:
102
+ print(f"⚠️ Error in contracts agent analysis: {e}")
103
+ return self._get_fallback_data(query)
104
+
105
+ def _parse_llm_output(self, output: str) -> Dict[str, Any]:
106
+ """Parse LLM output and extract structured data"""
107
+ try:
108
+ # Remove markdown code blocks if present
109
+ output = re.sub(r'```json\s*', '', output)
110
+ output = re.sub(r'```\s*', '', output)
111
+
112
+ # Try to find JSON in the output
113
+ json_matches = list(re.finditer(r'\{(?:[^{}]|(?:\{(?:[^{}]|(?:\{[^{}]*\}))*\}))*\}', output, re.DOTALL))
114
+
115
+ if json_matches:
116
+ # Try each match from longest to shortest
117
+ for match in sorted(json_matches, key=lambda m: len(m.group()), reverse=True):
118
+ try:
119
+ json_str = match.group()
120
+ data = json.loads(json_str)
121
+
122
+ # Validate required fields
123
+ if all(key in data for key in ['ce_exposure_total', 'ce_open_count', 'issues_identified']):
124
+ return data
125
+ except json.JSONDecodeError:
126
+ continue
127
+
128
+ print("⚠️ Could not parse JSON from LLM output, attempting text parsing")
129
+ return self._parse_text_output(output)
130
+
131
+ except Exception as e:
132
+ print(f"⚠️ JSON parsing error: {e}")
133
+ return self._parse_text_output(output)
134
+
135
+ def _parse_text_output(self, output: str) -> Dict[str, Any]:
136
+ """Parse plain text output into structured format"""
137
+ lines = output.split('\n')
138
+
139
+ issues = []
140
+ actions = []
141
+ contracts = []
142
+
143
+ for line in lines:
144
+ line = line.strip()
145
+
146
+ # Look for contract references
147
+ contract_match = re.search(r'C-\d{4}', line)
148
+ if contract_match and len(contracts) < 3:
149
+ contracts.append(line.lstrip('β€’-*123456789. '))
150
+
151
+ # Look for issues
152
+ if any(keyword in line.lower() for keyword in ['issue', 'concern', 'risk', 'backlog', 'breach', 'exception']):
153
+ if len(line) > 20 and len(issues) < 5:
154
+ issues.append(line.lstrip('β€’-*123456789. '))
155
+
156
+ # Look for actions
157
+ elif any(keyword in line.lower() for keyword in ['recommend', 'should', 'action', 'prioritize', 'implement']):
158
+ if len(line) > 20 and len(actions) < 5:
159
+ actions.append(line.lstrip('β€’-*123456789. '))
160
+
161
+ return {
162
+ "ce_exposure_total": "Β£8.5M",
163
+ "ce_open_count": 22,
164
+ "issues_identified": issues if issues else [
165
+ "Significant CE backlog requiring immediate attention",
166
+ "Multiple contracts showing elevated commercial risk",
167
+ "Approval processes experiencing delays",
168
+ "Contract variation management needs improvement"
169
+ ],
170
+ "recommended_actions": actions if actions else [
171
+ "Conduct comprehensive CE review across top contracts",
172
+ "Establish regular commercial risk meetings",
173
+ "Implement stricter approval tracking mechanisms",
174
+ "Review and update contract management procedures"
175
+ ],
176
+ "contracts_at_risk": contracts if contracts else [
177
+ "C-2401 (High exposure)",
178
+ "C-2398 (Multiple CEs pending)",
179
+ "C-2405 (Approval delays)"
180
+ ],
181
+ "kpis_flagged": {
182
+ "CE_Exposure_Total": "Β£8.5M",
183
+ "CE_Open_Count": 22,
184
+ "Approvals_AvgAge_Days": 42
185
+ },
186
+ "guardrails_triggered": ["SpendVariance", "ApprovalSLA"]
187
+ }
188
+
189
+ def _get_fallback_data(self, query: str = "") -> Dict[str, Any]:
190
+ """Fallback data with some query awareness"""
191
+
192
+ query_lower = query.lower() if query else ""
193
+
194
+ if "critical" in query_lower or "urgent" in query_lower:
195
+ ce_exposure = "Β£11.2M"
196
+ ce_count = 31
197
+ issues = [
198
+ "CRITICAL: CE backlog aging - 12 events >90 days old",
199
+ "Top 2 contracts account for Β£8.1M (72%) of CE exposure",
200
+ "Contract C-2401: Β£4.5M CE pending URGENT assessment",
201
+ "Major clause exceptions on 7 variation orders",
202
+ "Approval SLA breached on 9 CE decisions"
203
+ ]
204
+ guardrails = ["SpendVariance", "ApprovalSLA", "ClauseException", "CEBacklog"]
205
+ elif "contract" in query_lower or "ce" in query_lower or "compensation" in query_lower:
206
+ ce_exposure = "Β£9.3M"
207
+ ce_count = 26
208
+ issues = [
209
+ "CE backlog aging: 8 events >90 days old",
210
+ "Contract C-2401: Β£3.5M CE pending detailed assessment",
211
+ "NEC clause 60.1(1) notices increasing by 15%",
212
+ "Approval workflows experiencing 5-day average delays"
213
+ ]
214
+ guardrails = ["SpendVariance", "ApprovalSLA"]
215
+ elif "exposure" in query_lower or "risk" in query_lower:
216
+ ce_exposure = "Β£10.1M"
217
+ ce_count = 28
218
+ issues = [
219
+ "Commercial exposure elevated across 5 major contracts",
220
+ "CE forecast trending 18% above baseline",
221
+ "Contract C-2398 showing rapid CE accumulation",
222
+ "Risk register requires urgent update for 3 contracts"
223
+ ]
224
+ guardrails = ["SpendVariance", "CEBacklog"]
225
+ else:
226
+ ce_exposure = "Β£8.7M"
227
+ ce_count = 23
228
+ issues = [
229
+ "CE backlog aging: 7 events >90 days old",
230
+ "Top 3 contracts account for Β£6.2M (71%) of CE exposure",
231
+ "Contract C-2401: Β£3.1M CE pending assessment",
232
+ "Clause exceptions detected on 4 variation orders",
233
+ "Approval SLA breached on 5 CE decisions"
234
+ ]
235
+ guardrails = ["SpendVariance", "ApprovalSLA", "ClauseException"]
236
+
237
+ return {
238
+ "agent_name": "CE Insights Agent",
239
+ "ce_exposure_total": ce_exposure,
240
+ "ce_open_count": ce_count,
241
+ "issues_identified": issues,
242
+ "recommended_actions": [
243
+ f"Prioritize CE assessment for C-2401, C-2398 {'URGENTLY' if 'critical' in query_lower else ''}",
244
+ "Establish weekly CE triage meetings with Commercial team",
245
+ "Review clause exceptions with legal within 7 days",
246
+ "Clear CE backlog >90 days by end of Q4 2024",
247
+ "Implement automated CE tracking dashboard"
248
+ ],
249
+ "contracts_at_risk": [
250
+ "C-2401 (Β£3.1M exposure)",
251
+ "C-2398 (Β£2.0M exposure)",
252
+ "C-2405 (Β£1.1M exposure)"
253
+ ],
254
+ "kpis_flagged": {
255
+ "CE_Exposure_Total": ce_exposure,
256
+ "CE_Open_Count": ce_count,
257
+ "CE_Forecast_Value": "Β£10.2M",
258
+ "Approvals_AvgAge_Days": 47
259
+ },
260
+ "guardrails_triggered": guardrails
261
+ }
src/agents/report_agent.py DELETED
File without changes
src/agents/supplier_agent.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crewai import Agent, Task, Crew
2
+ from typing import Dict, Any
3
+ import json
4
+ import re
5
+
6
+ class SupplierAgent:
7
+ def __init__(self, llm=None):
8
+ self.llm = llm
9
+ self.agent = Agent(
10
+ role="Supplier Fragility Analyst",
11
+ goal="Assess supplier health, fragility scores, and predict supplier outlook based on capital project data",
12
+ backstory="""You are an expert Commercial Director with 20+ years of experience analyzing supplier
13
+ performance across major capital projects. You evaluate payment lags, retention,
14
+ float days, and compliance to generate fragility scores and risk outlooks. You provide
15
+ specific, actionable insights based on industry best practices and real-world experience.
16
+ You understand NEC contracts, supply chain risk, and cash flow management.""",
17
+ verbose=True,
18
+ llm=llm,
19
+ allow_delegation=False
20
+ )
21
+
22
+ def analyze(self, query: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
23
+ """Analyze supplier-related query using LLM and return structured insights"""
24
+
25
+ # Create task for the agent
26
+ task = Task(
27
+ description=f"""
28
+ Analyze the following capital project query from a supplier perspective:
29
+
30
+ QUERY: {query}
31
+
32
+ CONTEXT: {context if context else 'Portfolio-wide analysis across multiple capital projects worth Β£500M+ total'}
33
+
34
+ Your analysis must cover:
35
+ 1. Supplier fragility assessment (score 0.0-1.0, where 1.0 is most fragile)
36
+ - 0.0-0.3: LOW risk (GREEN)
37
+ - 0.4-0.6: MEDIUM risk (AMBER)
38
+ - 0.7-1.0: HIGH risk (RED)
39
+
40
+ 2. Outlook classification: RED, AMBER, or GREEN
41
+
42
+ 3. Specific issues related to:
43
+ - Payment lags (>30 days concerning, >45 days critical)
44
+ - Retention outstanding (high retention = cash flow risk)
45
+ - Float days (schedule buffer, <15 days concerning, <10 critical)
46
+ - Compliance rates (target >90%, <80% concerning)
47
+
48
+ 4. Concrete recommended actions with timeframes
49
+
50
+ 5. Key KPIs with actual values
51
+
52
+ 6. Guardrails triggered: FloatDrop, RetentionRelease, PaymentLag, ComplianceGap, SpendVariance
53
+
54
+ IMPORTANT:
55
+ - Provide realistic, specific numbers for a real Β£500M+ portfolio
56
+ - Use supplier names like "Tier-1 suppliers", specific counts
57
+ - Be specific with monetary values (Β£M format)
58
+ - **Analyze the actual query and provide relevant insights**
59
+ - **Different queries should produce different analyses**
60
+ - Think like a Commercial Director reviewing real project data
61
+
62
+ Return ONLY valid JSON (no markdown code blocks, no ```json, just pure JSON):
63
+ {{
64
+ "fragility_score": <number between 0.0 and 1.0>,
65
+ "outlook": "<RED or AMBER or GREEN>",
66
+ "issues_identified": [
67
+ "<specific issue with actual numbers and supplier names>",
68
+ "<another specific issue with metrics>",
69
+ "<additional issues as relevant to the query>"
70
+ ],
71
+ "recommended_actions": [
72
+ "<specific action with timeframe like 'within 14 days'>",
73
+ "<another action with specific deliverables>",
74
+ "<additional actions as needed>"
75
+ ],
76
+ "kpis_flagged": {{
77
+ "SupplierFragility_Score": <number matching fragility_score above>,
78
+ "Supplier_AvgFloatDays": <number>,
79
+ "NoticeComplianceRate": "<percentage like 78%>",
80
+ "RetentionOutstanding": "<amount like Β£2.3M>"
81
+ }},
82
+ "guardrails_triggered": ["<guardrail name>", "<another guardrail if applicable>"]
83
+ }}
84
+
85
+ CRITICAL INSTRUCTIONS:
86
+ - Analyze the specific query "{query}" and provide a contextual response
87
+ - If asked about payment processing, focus on payment metrics and timelines
88
+ - If asked about critical issues, prioritize the most severe problems
89
+ - If asked about specific metrics (float, compliance, retention), emphasize those
90
+ - Make your response directly relevant to what was asked
91
+ - Use varied, realistic numbers that reflect actual portfolio conditions
92
+ - DO NOT just copy example values - generate fresh analysis each time
93
+ """,
94
+ agent=self.agent,
95
+ expected_output="JSON formatted supplier analysis tailored to the specific query with contextually relevant insights"
96
+ )
97
+
98
+ # Create a crew with just this agent and execute
99
+ crew = Crew(
100
+ agents=[self.agent],
101
+ tasks=[task],
102
+ verbose=True
103
+ )
104
+
105
+ try:
106
+ # Execute the task
107
+ result = crew.kickoff()
108
+
109
+ # Parse the LLM output
110
+ parsed_result = self._parse_llm_output(str(result))
111
+
112
+ # Add agent name
113
+ parsed_result["agent_name"] = "Supplier Fragility Agent"
114
+
115
+ return parsed_result
116
+
117
+ except Exception as e:
118
+ print(f"⚠️ Error in supplier agent analysis: {e}")
119
+ # Fallback to dummy data if LLM fails
120
+ return self._get_fallback_data(query)
121
+
122
+ def _parse_llm_output(self, output: str) -> Dict[str, Any]:
123
+ """Parse LLM output and extract structured data"""
124
+ try:
125
+ # Remove markdown code blocks if present
126
+ output = re.sub(r'```json\s*', '', output)
127
+ output = re.sub(r'```\s*', '', output)
128
+
129
+ # Try to find JSON in the output
130
+ # Look for the most complete JSON object
131
+ json_matches = list(re.finditer(r'\{(?:[^{}]|(?:\{(?:[^{}]|(?:\{[^{}]*\}))*\}))*\}', output, re.DOTALL))
132
+
133
+ if json_matches:
134
+ # Try each match from longest to shortest
135
+ for match in sorted(json_matches, key=lambda m: len(m.group()), reverse=True):
136
+ try:
137
+ json_str = match.group()
138
+ data = json.loads(json_str)
139
+
140
+ # Validate required fields
141
+ if all(key in data for key in ['fragility_score', 'outlook', 'issues_identified']):
142
+ return data
143
+ except json.JSONDecodeError:
144
+ continue
145
+
146
+ # If no valid JSON found, try to parse as plain text
147
+ print("⚠️ Could not parse JSON from LLM output, attempting text parsing")
148
+ return self._parse_text_output(output)
149
+
150
+ except Exception as e:
151
+ print(f"⚠️ JSON parsing error: {e}")
152
+ return self._parse_text_output(output)
153
+
154
+ def _parse_text_output(self, output: str) -> Dict[str, Any]:
155
+ """Parse plain text output into structured format"""
156
+ # Simple heuristic parsing if JSON fails
157
+ lines = output.split('\n')
158
+
159
+ issues = []
160
+ actions = []
161
+
162
+ for line in lines:
163
+ line = line.strip()
164
+ if any(keyword in line.lower() for keyword in ['issue', 'concern', 'risk', 'problem', 'lag', 'outstanding']):
165
+ if len(line) > 20 and len(issues) < 4:
166
+ issues.append(line.lstrip('β€’-*123456789. '))
167
+ elif any(keyword in line.lower() for keyword in ['recommend', 'should', 'action', 'need to', 'must']):
168
+ if len(line) > 20 and len(actions) < 4:
169
+ actions.append(line.lstrip('β€’-*123456789. '))
170
+
171
+ return {
172
+ "fragility_score": 0.65,
173
+ "outlook": "AMBER",
174
+ "issues_identified": issues if issues else [
175
+ "Supplier payment processing delays detected",
176
+ "Schedule float concerns across multiple contracts",
177
+ "Compliance metrics below target thresholds"
178
+ ],
179
+ "recommended_actions": actions if actions else [
180
+ "Review and expedite supplier payment cycles",
181
+ "Conduct supplier health assessments within 30 days",
182
+ "Implement enhanced monitoring for at-risk suppliers"
183
+ ],
184
+ "kpis_flagged": {
185
+ "SupplierFragility_Score": 0.65,
186
+ "Supplier_AvgFloatDays": 13,
187
+ "NoticeComplianceRate": "82%"
188
+ },
189
+ "guardrails_triggered": ["FloatDrop"]
190
+ }
191
+
192
+ def _get_fallback_data(self, query: str = "") -> Dict[str, Any]:
193
+ """Fallback data with some query awareness"""
194
+
195
+ query_lower = query.lower() if query else ""
196
+
197
+ # Vary response based on query keywords
198
+ if "critical" in query_lower or "urgent" in query_lower:
199
+ fragility = 0.78
200
+ outlook = "RED"
201
+ issues = [
202
+ "CRITICAL: 5 Tier-1 suppliers with payment lag >60 days",
203
+ "Schedule float at CRITICAL 8 days (threshold: 15 days)",
204
+ "Notice compliance DROPPED to 68% (target: 90%)",
205
+ "Β£3.8M retention outstanding - immediate action required"
206
+ ]
207
+ guardrails = ["FloatDrop", "RetentionRelease", "PaymentLag", "ComplianceGap"]
208
+ elif "payment" in query_lower or "cash" in query_lower or "processing" in query_lower:
209
+ fragility = 0.72
210
+ outlook = "AMBER"
211
+ issues = [
212
+ "4 suppliers showing payment lag 45-55 days",
213
+ "Cash flow constraints affecting 3 Tier-1 suppliers",
214
+ "Payment processing delays in accounts payable",
215
+ "Β£2.1M in disputed invoices pending resolution"
216
+ ]
217
+ guardrails = ["PaymentLag", "RetentionRelease"]
218
+ elif "float" in query_lower or "schedule" in query_lower:
219
+ fragility = 0.65
220
+ outlook = "AMBER"
221
+ issues = [
222
+ "Average float reduced to 11 days across portfolio",
223
+ "3 critical path suppliers showing schedule pressure",
224
+ "Weather delays impacting 2 major contracts",
225
+ "Resource constraints reducing buffer capacity"
226
+ ]
227
+ guardrails = ["FloatDrop"]
228
+ else:
229
+ # Default portfolio-wide response
230
+ fragility = 0.68
231
+ outlook = "AMBER"
232
+ issues = [
233
+ "3 Tier-1 suppliers showing payment lag >45 days",
234
+ "Total float reduced to 12 days average (threshold: 15 days)",
235
+ "Notice compliance at 78% (target: 90%)",
236
+ "Β£2.3M retention outstanding across 5 contracts"
237
+ ]
238
+ guardrails = ["FloatDrop", "RetentionRelease"]
239
+
240
+ return {
241
+ "agent_name": "Supplier Fragility Agent",
242
+ "fragility_score": fragility,
243
+ "outlook": outlook,
244
+ "issues_identified": issues,
245
+ "recommended_actions": [
246
+ f"Accelerate payment processing for high-risk suppliers within {7 if fragility > 0.75 else 14} days",
247
+ "Review and release retention where contractually appropriate",
248
+ f"Schedule {'urgent' if fragility > 0.75 else 'routine'} supplier engagement meetings for Q1 2025",
249
+ "Monitor float trajectory weekly for critical path suppliers"
250
+ ],
251
+ "kpis_flagged": {
252
+ "SupplierFragility_Score": fragility,
253
+ "Supplier_AvgFloatDays": 8 if fragility > 0.75 else 12,
254
+ "NoticeComplianceRate": "68%" if fragility > 0.75 else "78%",
255
+ "RetentionOutstanding": f"Β£{3.8 if fragility > 0.75 else 2.3}M"
256
+ },
257
+ "guardrails_triggered": guardrails
258
+ }
src/orchestrator/master_orchestrator.py CHANGED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crewai import Crew, Process, LLM, Agent, Task
2
+ from typing import Dict, Any, List
3
+ import os
4
+ from dotenv import load_dotenv
5
+ import json
6
+ import re
7
+ import warnings
8
+
9
+ # Load environment variables FIRST
10
+ load_dotenv()
11
+
12
+ # Disable CrewAI telemetry
13
+ os.environ['OTEL_SDK_DISABLED'] = 'true'
14
+
15
+ # Disable SSL warnings and verification
16
+ try:
17
+ import urllib3
18
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
19
+ except:
20
+ pass
21
+
22
+ warnings.filterwarnings('ignore')
23
+
24
+ # Disable SSL verification globally
25
+ import ssl
26
+ try:
27
+ _create_unverified_https_context = ssl._create_unverified_context
28
+ except AttributeError:
29
+ pass
30
+ else:
31
+ ssl._create_default_https_context = _create_unverified_https_context
32
+
33
+ # Set environment variables to disable SSL verification
34
+ os.environ['CURL_CA_BUNDLE'] = ''
35
+ os.environ['REQUESTS_CA_BUNDLE'] = ''
36
+ os.environ['SSL_CERT_FILE'] = ''
37
+ os.environ['PYTHONHTTPSVERIFY'] = '0'
38
+
39
+ from agents.supplier_agent import SupplierAgent
40
+ from agents.contracts_agent import ContractsAgent
41
+
42
+ class MasterOrchestrator:
43
+ def __init__(self, llm=None):
44
+ # Initialize LLM if not provided
45
+ if llm is None:
46
+ # Try OpenAI first, fall back to Anthropic
47
+ if os.getenv("OPENAI_API_KEY"):
48
+ try:
49
+ # Configure LiteLLM to ignore SSL
50
+ import litellm
51
+ litellm.ssl_verify = False
52
+ litellm.drop_params = True
53
+
54
+ self.llm = LLM(
55
+ model="gpt-4o-mini",
56
+ api_key=os.getenv("OPENAI_API_KEY")
57
+ )
58
+ print("βœ… LLM initialized with OpenAI (SSL disabled)")
59
+ except Exception as e:
60
+ print(f"⚠️ OpenAI initialization failed: {e}")
61
+ self.llm = None
62
+ elif os.getenv("ANTHROPIC_API_KEY"):
63
+ try:
64
+ self.llm = LLM(
65
+ model="claude-3-5-sonnet-20241022",
66
+ api_key=os.getenv("ANTHROPIC_API_KEY")
67
+ )
68
+ print("βœ… LLM initialized with Anthropic")
69
+ except Exception as e:
70
+ print(f"⚠️ Anthropic initialization failed: {e}")
71
+ self.llm = None
72
+ else:
73
+ print("⚠️ No API key found. Using fallback data mode for demo.")
74
+ self.llm = None
75
+ else:
76
+ self.llm = llm
77
+
78
+ # Initialize agents with LLM
79
+ self.supplier_agent = SupplierAgent(self.llm)
80
+ self.contracts_agent = ContractsAgent(self.llm)
81
+
82
+ # Set routing agent flag (just for compatibility checking)
83
+ self.routing_agent = self.llm # Direct LLM use, no CrewAI agent needed
84
+
85
+ def route_query(self, query: str) -> Dict[str, Any]:
86
+ """
87
+ Use OpenAI function calling for fast, reliable routing with keyword fallback
88
+ """
89
+
90
+ # If no LLM available, use keyword routing
91
+ if self.llm is None or self.routing_agent is None:
92
+ return self._keyword_route(query)
93
+
94
+ try:
95
+ from openai import OpenAI
96
+
97
+ # Get API key
98
+ api_key = os.getenv("OPENAI_API_KEY")
99
+ if not api_key:
100
+ return self._keyword_route(query)
101
+
102
+ client = OpenAI(api_key=api_key)
103
+
104
+ # Define the routing function schema
105
+ routing_function = {
106
+ "name": "route_query",
107
+ "description": "Route a capital project query to appropriate specialist agents based on their expertise",
108
+ "parameters": {
109
+ "type": "object",
110
+ "properties": {
111
+ "supplier": {
112
+ "type": "object",
113
+ "description": "Decision for Supplier Fragility Agent",
114
+ "properties": {
115
+ "should_check": {
116
+ "type": "boolean",
117
+ "description": "Whether this agent should analyze the query"
118
+ },
119
+ "reason": {
120
+ "type": "string",
121
+ "description": "Brief explanation for the decision"
122
+ },
123
+ "priority": {
124
+ "type": "string",
125
+ "enum": ["high", "medium", "low"],
126
+ "description": "Priority level for this agent"
127
+ }
128
+ },
129
+ "required": ["should_check", "reason", "priority"]
130
+ },
131
+ "contracts": {
132
+ "type": "object",
133
+ "description": "Decision for CE Insights Agent (Contracts)",
134
+ "properties": {
135
+ "should_check": {
136
+ "type": "boolean",
137
+ "description": "Whether this agent should analyze the query"
138
+ },
139
+ "reason": {
140
+ "type": "string",
141
+ "description": "Brief explanation for the decision"
142
+ },
143
+ "priority": {
144
+ "type": "string",
145
+ "enum": ["high", "medium", "low"],
146
+ "description": "Priority level for this agent"
147
+ }
148
+ },
149
+ "required": ["should_check", "reason", "priority"]
150
+ }
151
+ },
152
+ "required": ["supplier", "contracts"]
153
+ }
154
+ }
155
+
156
+ # Make the function calling request
157
+ response = client.chat.completions.create(
158
+ model="gpt-4o-mini",
159
+ messages=[
160
+ {
161
+ "role": "system",
162
+ "content": """You are a routing system for a capital project management platform.
163
+
164
+ Your job is to analyze executive queries and route them to the appropriate specialist agents.
165
+
166
+ Available Agents:
167
+ 1. Supplier Fragility Agent
168
+ - Expertise: supplier health, payment lags, float days, retention, vendor risk, cash flow, subcontractor issues, compliance rates
169
+ - Use for: questions about suppliers, payments, supply chain, vendor performance
170
+
171
+ 2. CE Insights Agent (Contracts)
172
+ - Expertise: contract exposure, compensation events (CE), variations, commercial risk, approvals, NEC contracts, change orders, claims
173
+ - Use for: questions about contracts, CE, commercial issues, contract performance
174
+
175
+ Routing Rules:
176
+ - If the query is specific to one domain, route to that agent with HIGH priority
177
+ - If the query is broad or portfolio-wide, route to BOTH agents with MEDIUM priority
178
+ - Always provide a clear reason for your routing decision"""
179
+ },
180
+ {
181
+ "role": "user",
182
+ "content": f"Route this query: {query}"
183
+ }
184
+ ],
185
+ functions=[routing_function],
186
+ function_call={"name": "route_query"},
187
+ temperature=0.3,
188
+ timeout=10
189
+ )
190
+
191
+ # Extract function arguments
192
+ function_args = json.loads(
193
+ response.choices[0].message.function_call.arguments
194
+ )
195
+
196
+ return function_args
197
+
198
+ except Exception as e:
199
+ print(f"Function calling routing failed: {e}, using keyword fallback")
200
+ return self._keyword_route(query)
201
+
202
+ def _keyword_route(self, query: str) -> Dict[str, Any]:
203
+ """Fallback keyword-based routing"""
204
+ query_lower = query.lower()
205
+
206
+ # Define keywords for each agent
207
+ supplier_keywords = ['supplier', 'payment', 'float', 'retention', 'fragility', 'vendor',
208
+ 'cash flow', 'pay', 'subcontractor']
209
+ contract_keywords = ['contract', 'ce', 'compensation', 'commercial', 'variation', 'cost',
210
+ 'approval', 'change order', 'claim']
211
+ portfolio_keywords = ['portfolio', 'overall', 'summary', 'dashboard', 'all', 'everything']
212
+
213
+ supplier_match = any(keyword in query_lower for keyword in supplier_keywords)
214
+ contract_match = any(keyword in query_lower for keyword in contract_keywords)
215
+ portfolio_match = any(keyword in query_lower for keyword in portfolio_keywords)
216
+
217
+ # Routing logic
218
+ routing = {
219
+ 'supplier': {
220
+ 'should_check': supplier_match or portfolio_match or (not supplier_match and not contract_match),
221
+ 'reason': 'Query contains supplier-related keywords' if supplier_match
222
+ else 'Portfolio-wide query' if portfolio_match
223
+ else 'Comprehensive check',
224
+ 'priority': 'high' if supplier_match else 'medium'
225
+ },
226
+ 'contracts': {
227
+ 'should_check': contract_match or portfolio_match or (not supplier_match and not contract_match),
228
+ 'reason': 'Query contains contract-related keywords' if contract_match
229
+ else 'Portfolio-wide query' if portfolio_match
230
+ else 'Comprehensive check',
231
+ 'priority': 'high' if contract_match else 'medium'
232
+ }
233
+ }
234
+
235
+ return routing
236
+
237
+ def orchestrate(self, query: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
238
+ """
239
+ Main orchestration method that:
240
+ 1. Routes query to appropriate agents
241
+ 2. Collects agent responses
242
+ 3. Synthesizes executive summary
243
+ """
244
+
245
+ print(f"\n{'='*80}")
246
+ print(f"EXECUTIVE DASHBOARD - CAPITAL PROJECT INTELLIGENCE")
247
+ print(f"{'='*80}")
248
+ print(f"\nQuery: {query}\n")
249
+
250
+ # Determine routing
251
+ routing = self.route_query(query)
252
+
253
+ print(f"πŸ” AGENT ROUTING DECISION:")
254
+ for agent_name, route_info in routing.items():
255
+ status = "βœ“ WILL CONSULT" if route_info['should_check'] else "βœ— SKIPPING"
256
+ priority = route_info.get('priority', 'medium').upper()
257
+ print(f" {status} - {agent_name.upper()} [{priority}]: {route_info['reason']}")
258
+ print()
259
+
260
+ # Collect agent outputs
261
+ agent_outputs = []
262
+ agents_checked = []
263
+ agents_with_findings = []
264
+
265
+ # Consult Supplier Agent if needed
266
+ if routing['supplier']['should_check']:
267
+ agents_checked.append('Supplier Fragility Agent')
268
+ print("πŸ“Š Consulting Supplier Fragility Agent...")
269
+
270
+ try:
271
+ supplier_output = self.supplier_agent.analyze(query, context)
272
+
273
+ # Check if agent has relevant findings
274
+ if self._has_relevant_findings(supplier_output, query):
275
+ agent_outputs.append(supplier_output)
276
+ agents_with_findings.append('Supplier Fragility Agent')
277
+ print(" βœ“ Relevant findings identified\n")
278
+ else:
279
+ print(" β„Ή No critical findings for this query\n")
280
+ except Exception as e:
281
+ print(f" ⚠ Error consulting supplier agent: {e}\n")
282
+
283
+ # Consult Contracts Agent if needed
284
+ if routing['contracts']['should_check']:
285
+ agents_checked.append('CE Insights Agent')
286
+ print("πŸ“‹ Consulting CE Insights Agent (Contracts)...")
287
+
288
+ try:
289
+ contracts_output = self.contracts_agent.analyze(query, context)
290
+
291
+ # Check if agent has relevant findings
292
+ if self._has_relevant_findings(contracts_output, query):
293
+ agent_outputs.append(contracts_output)
294
+ agents_with_findings.append('CE Insights Agent')
295
+ print(" βœ“ Relevant findings identified\n")
296
+ else:
297
+ print(" β„Ή No critical findings for this query\n")
298
+ except Exception as e:
299
+ print(f" ⚠ Error consulting contracts agent: {e}\n")
300
+
301
+ # Synthesize executive summary
302
+ executive_summary = self._synthesize_summary(query, agent_outputs)
303
+
304
+ return {
305
+ "query": query,
306
+ "agents_checked": agents_checked,
307
+ "agents_with_findings": agents_with_findings,
308
+ "agent_outputs": agent_outputs,
309
+ "executive_summary": executive_summary,
310
+ "routing_decision": routing
311
+ }
312
+
313
+ def _has_relevant_findings(self, agent_output: Dict[str, Any], query: str) -> bool:
314
+ """
315
+ Determine if agent output has relevant findings for the query.
316
+ This is a simple heuristic - can be enhanced with LLM-based relevance checking.
317
+ """
318
+ # Check if there are any issues identified
319
+ issues = agent_output.get('issues_identified', [])
320
+
321
+ # Check if guardrails were triggered
322
+ guardrails = agent_output.get('guardrails_triggered', [])
323
+
324
+ # For now, consider output relevant if it has issues or triggered guardrails
325
+ # You could make this more sophisticated by checking query keywords against issues
326
+ return len(issues) > 0 or len(guardrails) > 0
327
+
328
+ def _synthesize_summary(self, query: str, agent_outputs: List[Dict[str, Any]]) -> Dict[str, Any]:
329
+ """Synthesize insights from multiple agents into executive summary"""
330
+
331
+ all_issues = []
332
+ all_actions = []
333
+ all_guardrails = set()
334
+
335
+ for output in agent_outputs:
336
+ all_issues.extend(output.get("issues_identified", []))
337
+ all_actions.extend(output.get("recommended_actions", []))
338
+ all_guardrails.update(output.get("guardrails_triggered", []))
339
+
340
+ # Determine overall status
341
+ if any(g in ['SpendVariance', 'CEBacklog', 'PaymentLag'] for g in all_guardrails):
342
+ status = 'RED'
343
+ elif all_guardrails:
344
+ status = 'AMBER'
345
+ else:
346
+ status = 'GREEN'
347
+
348
+ return {
349
+ "status": status,
350
+ "critical_issues_count": len(all_issues),
351
+ "issues_summary": all_issues[:5], # Top 5 issues
352
+ "recommended_actions": all_actions[:5], # Top 5 actions
353
+ "guardrails_triggered": list(all_guardrails),
354
+ "escalation_required": len(all_guardrails) > 2
355
+ }
356
+
357
+ def format_output(self, result: Dict[str, Any]) -> str:
358
+ """Format orchestrator output for executive presentation"""
359
+
360
+ output = []
361
+ output.append("\n" + "="*80)
362
+ output.append("EXECUTIVE SUMMARY")
363
+ output.append("="*80)
364
+
365
+ summary = result["executive_summary"]
366
+ output.append(f"\nπŸ“ Status: {summary['status']}")
367
+ output.append(f"🚨 Critical Issues: {summary['critical_issues_count']}")
368
+ output.append(f"⚠️ Guardrails Triggered: {', '.join(summary['guardrails_triggered']) if summary['guardrails_triggered'] else 'None'}")
369
+ output.append(f"πŸ”Ί Escalation Required: {'YES' if summary['escalation_required'] else 'NO'}")
370
+
371
+ output.append("\n" + "-"*80)
372
+ output.append("AGENTS CONSULTED")
373
+ output.append("-"*80)
374
+
375
+ # Show all agents checked
376
+ for agent in result["agents_checked"]:
377
+ if agent in result["agents_with_findings"]:
378
+ output.append(f" βœ“ {agent} - Relevant findings identified")
379
+ else:
380
+ output.append(f" β—‹ {agent} - No critical findings")
381
+
382
+ if result["agents_with_findings"]:
383
+ output.append("\n" + "-"*80)
384
+ output.append("KEY ISSUES IDENTIFIED")
385
+ output.append("-"*80)
386
+ for i, issue in enumerate(summary["issues_summary"], 1):
387
+ output.append(f" {i}. {issue}")
388
+
389
+ output.append("\n" + "-"*80)
390
+ output.append("RECOMMENDED ACTIONS")
391
+ output.append("-"*80)
392
+ for i, action in enumerate(summary["recommended_actions"], 1):
393
+ output.append(f" {i}. {action}")
394
+ else:
395
+ output.append("\n" + "-"*80)
396
+ output.append("βœ“ No critical issues identified across all agents")
397
+ output.append("-"*80)
398
+
399
+ # Detailed agent outputs - only for agents with findings
400
+ if result["agent_outputs"]:
401
+ output.append("\n" + "="*80)
402
+ output.append("DETAILED AGENT ANALYSIS")
403
+ output.append("="*80)
404
+
405
+ for agent_output in result["agent_outputs"]:
406
+ output.append(f"\nπŸ“Š {agent_output['agent_name'].upper()}")
407
+ output.append("-"*80)
408
+
409
+ if 'fragility_score' in agent_output:
410
+ output.append(f" Fragility Score: {agent_output['fragility_score']}")
411
+ output.append(f" Outlook: {agent_output['outlook']}")
412
+
413
+ if 'ce_exposure_total' in agent_output:
414
+ output.append(f" CE Exposure: {agent_output['ce_exposure_total']}")
415
+ output.append(f" Open CE Count: {agent_output['ce_open_count']}")
416
+
417
+ output.append("\n Issues:")
418
+ for issue in agent_output.get('issues_identified', []):
419
+ output.append(f" β€’ {issue}")
420
+
421
+ output.append("\n Recommended Actions:")
422
+ for action in agent_output.get('recommended_actions', []):
423
+ output.append(f" β€’ {action}")
424
+
425
+ output.append("\n" + "="*80 + "\n")
426
+
427
+ return "\n".join(output)
verify_key.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ import requests
4
+
5
+ load_dotenv()
6
+
7
+ api_key = os.getenv("OPENAI_API_KEY")
8
+
9
+ if not api_key:
10
+ print("❌ No API key found")
11
+ exit()
12
+
13
+ print(f"Testing key: {api_key[:20]}...")
14
+
15
+ headers = {
16
+ "Authorization": f"Bearer {api_key}",
17
+ "Content-Type": "application/json"
18
+ }
19
+
20
+ data = {
21
+ "model": "gpt-3.5-turbo",
22
+ "messages": [{"role": "user", "content": "test"}],
23
+ "max_tokens": 5
24
+ }
25
+
26
+ try:
27
+ response = requests.post(
28
+ "https://api.openai.com/v1/chat/completions",
29
+ headers=headers,
30
+ json=data,
31
+ timeout=10,
32
+ verify=False # Disable SSL for corporate networks
33
+ )
34
+
35
+ if response.status_code == 200:
36
+ print("βœ… API key is valid and working!")
37
+ elif response.status_code == 401:
38
+ print("❌ Invalid API key")
39
+ print(response.json())
40
+ elif response.status_code == 429:
41
+ print("⚠️ Rate limit or no credits")
42
+ print(response.json())
43
+ else:
44
+ print(f"❌ Error {response.status_code}")
45
+ print(response.json())
46
+ except Exception as e:
47
+ print(f"❌ Connection error: {e}")