Gaston895 commited on
Commit
ceb55e4
·
verified ·
1 Parent(s): 127f1b9

🔧 Fix model loading using proven app_gunicorn.py approach - no pipeline, direct generation

Browse files
Files changed (1) hide show
  1. app.py +198 -1045
app.py CHANGED
@@ -1,8 +1,7 @@
1
  #!/usr/bin/env python3
2
  """
3
  Enhanced Flask App with LangGraph + AEGIS Economics AI
4
- Tech Scores LangGraph Processing → Qwen2 Model → Economic Predictions
5
- CPU-optimized version for Modal deployment
6
  """
7
 
8
  from flask import Flask, request, jsonify, render_template_string
@@ -14,43 +13,21 @@ import json
14
  from datetime import datetime
15
  from typing import Dict, List, Any, Optional
16
  from dataclasses import dataclass
17
- import asyncio
18
- from concurrent.futures import ThreadPoolExecutor
19
-
20
- # LangGraph imports
21
- try:
22
- from langgraph.graph import StateGraph, END
23
- from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, SystemMessage
24
- from langchain_core.prompts import ChatPromptTemplate
25
- LANGGRAPH_AVAILABLE = True
26
- print("✅ LangGraph successfully imported")
27
- except ImportError as e:
28
- LANGGRAPH_AVAILABLE = False
29
- print(f"⚠️ LangGraph import failed: {e}")
30
- print("Using simplified processing mode")
31
 
32
  # Configure logging
33
  logging.basicConfig(level=logging.INFO)
34
  logger = logging.getLogger(__name__)
35
 
 
 
 
36
  app = Flask(__name__)
37
 
38
  # Global variables
39
  model = None
40
  tokenizer = None
41
- executor = ThreadPoolExecutor(max_workers=2)
42
-
43
- def cleanup_memory():
44
- """Clean up GPU/CPU memory"""
45
- try:
46
- if torch.cuda.is_available():
47
- torch.cuda.empty_cache()
48
- torch.cuda.synchronize()
49
- # Force garbage collection
50
- import gc
51
- gc.collect()
52
- except Exception as e:
53
- logger.warning(f"Memory cleanup warning: {e}")
54
 
55
  @dataclass
56
  class TechScores:
@@ -86,341 +63,21 @@ class TechScores:
86
  threats.append(threat)
87
  return threats
88
 
89
- @dataclass
90
- class LangGraphState:
91
- """State for LangGraph processing"""
92
- tech_scores: TechScores
93
- processed_analysis: Optional[str] = None
94
- economic_context: Optional[str] = None
95
- risk_assessment: Optional[str] = None
96
- policy_recommendations: Optional[str] = None
97
- final_prompt: Optional[str] = None
98
- processing_steps: List[str] = None
99
-
100
- def __post_init__(self):
101
- if self.processing_steps is None:
102
- self.processing_steps = []
103
 
104
  class LangGraphProcessor:
105
- """LangGraph-based tech score processor"""
106
 
107
  def __init__(self):
108
  self.graph = None
109
- if LANGGRAPH_AVAILABLE:
110
- self._build_graph()
111
-
112
- def _build_graph(self):
113
- """Build the LangGraph processing pipeline"""
114
- try:
115
- # Create state graph
116
- from langgraph.graph import StateGraph, END
117
-
118
- workflow = StateGraph(dict) # Use dict instead of LangGraphState for compatibility
119
-
120
- # Add nodes
121
- workflow.add_node("analyze_tech_scores", self._analyze_tech_scores_simple)
122
- workflow.add_node("assess_economic_context", self._assess_economic_context_simple)
123
- workflow.add_node("evaluate_risks", self._evaluate_risks_simple)
124
- workflow.add_node("generate_policy_recs", self._generate_policy_recommendations_simple)
125
- workflow.add_node("create_final_prompt", self._create_final_prompt_simple)
126
-
127
- # Define edges
128
- workflow.set_entry_point("analyze_tech_scores")
129
- workflow.add_edge("analyze_tech_scores", "assess_economic_context")
130
- workflow.add_edge("assess_economic_context", "evaluate_risks")
131
- workflow.add_edge("evaluate_risks", "generate_policy_recs")
132
- workflow.add_edge("generate_policy_recs", "create_final_prompt")
133
- workflow.add_edge("create_final_prompt", END)
134
-
135
- # Compile graph
136
- self.graph = workflow.compile()
137
- logger.info("✅ LangGraph pipeline built successfully")
138
-
139
- except Exception as e:
140
- logger.error(f"Failed to build LangGraph: {e}")
141
- self.graph = None
142
-
143
- def _analyze_tech_scores_simple(self, state: dict) -> dict:
144
- """Simplified tech scores analysis for LangGraph compatibility"""
145
- tech_scores = state.get('tech_scores')
146
- if not tech_scores:
147
- return state
148
-
149
- total_threat = tech_scores.get_total_threat_level()
150
- dominant_threats = tech_scores.get_dominant_threats()
151
-
152
- analysis = f"""TECHNOLOGY THREAT ANALYSIS (Year {tech_scores.year}):
153
-
154
- Overall Threat Level: {total_threat:.3f} ({self._threat_level_description(total_threat)})
155
- Dominant Threats: {', '.join(dominant_threats) if dominant_threats else 'None above threshold'}
156
-
157
- Detailed Analysis:
158
- - AI: {tech_scores.ai:.3f} - {self._ai_threat_analysis(tech_scores.ai)}
159
- - Cyber: {tech_scores.cyber:.3f} - {self._cyber_threat_analysis(tech_scores.cyber)}
160
- - Bio: {tech_scores.bio:.3f} - {self._bio_threat_analysis(tech_scores.bio)}
161
- - Nuclear: {tech_scores.nuclear:.3f} - {self._nuclear_threat_analysis(tech_scores.nuclear)}
162
- - Climate: {tech_scores.climate:.3f} - {self._climate_threat_analysis(tech_scores.climate)}
163
- - Space: {tech_scores.space:.3f} - {self._space_threat_analysis(tech_scores.space)}"""
164
-
165
- state['processed_analysis'] = analysis
166
- state['processing_steps'] = state.get('processing_steps', []) + ["Technology threat analysis completed"]
167
- return state
168
-
169
- def _assess_economic_context_simple(self, state: dict) -> dict:
170
- """Simplified economic context assessment"""
171
- tech_scores = state.get('tech_scores')
172
- if not tech_scores:
173
- return state
174
-
175
- total_threat = tech_scores.get_total_threat_level()
176
- market_volatility = min(total_threat * 1.2, 1.0)
177
- gdp_impact = self._calculate_gdp_impact(tech_scores)
178
-
179
- economic_context = f"""ECONOMIC CONTEXT ASSESSMENT:
180
-
181
- Market Volatility Index: {market_volatility:.3f}
182
- Projected GDP Impact: {gdp_impact:.1f}%
183
- Economic Stability Risk: {self._stability_risk_level(total_threat)}
184
-
185
- Regional Vulnerabilities:
186
- {self._assess_regional_vulnerabilities(tech_scores)}"""
187
-
188
- state['economic_context'] = economic_context
189
- state['processing_steps'] = state.get('processing_steps', []) + ["Economic context assessment completed"]
190
- return state
191
-
192
- def _evaluate_risks_simple(self, state: dict) -> dict:
193
- """Simplified risk evaluation"""
194
- tech_scores = state.get('tech_scores')
195
- if not tech_scores:
196
- return state
197
-
198
- cascade_probability = self._calculate_cascade_probability(tech_scores)
199
- systemic_risk = self._assess_systemic_risk(tech_scores)
200
-
201
- risk_assessment = f"""COMPREHENSIVE RISK EVALUATION:
202
-
203
- Cascade Failure Probability: {cascade_probability:.3f}
204
- Systemic Risk Level: {systemic_risk}
205
-
206
- Critical Risk Scenarios:
207
- {self._generate_risk_scenarios(tech_scores)}"""
208
-
209
- state['risk_assessment'] = risk_assessment
210
- state['processing_steps'] = state.get('processing_steps', []) + ["Risk evaluation completed"]
211
- return state
212
-
213
- def _generate_policy_recommendations_simple(self, state: dict) -> dict:
214
- """Simplified policy recommendations"""
215
- tech_scores = state.get('tech_scores')
216
- if not tech_scores:
217
- return state
218
-
219
- immediate_actions = self._immediate_policy_actions(tech_scores)
220
-
221
- policy_recs = f"""POLICY RECOMMENDATIONS FRAMEWORK:
222
-
223
- IMMEDIATE ACTIONS (0-6 months):
224
- {immediate_actions}
225
-
226
- Resource Allocation Priorities:
227
- {self._resource_allocation_priorities(tech_scores)}"""
228
-
229
- state['policy_recommendations'] = policy_recs
230
- state['processing_steps'] = state.get('processing_steps', []) + ["Policy recommendations generated"]
231
- return state
232
-
233
- def _create_final_prompt_simple(self, state: dict) -> dict:
234
- """Simplified final prompt creation"""
235
-
236
- final_prompt = f"""You are AEGIS Economics AI, analyzing technology threats for economic policy planning.
237
-
238
- PROCESSED INTELLIGENCE BRIEFING:
239
-
240
- {state.get('processed_analysis', '')}
241
-
242
- {state.get('economic_context', '')}
243
-
244
- {state.get('risk_assessment', '')}
245
-
246
- {state.get('policy_recommendations', '')}
247
-
248
- ANALYSIS REQUEST:
249
- Based on this comprehensive intelligence briefing, provide your final economic analysis with specific numerical estimates and actionable recommendations for economic policy makers."""
250
-
251
- state['final_prompt'] = final_prompt
252
- state['processing_steps'] = state.get('processing_steps', []) + ["Final prompt optimization completed"]
253
- return state
254
- """Step 1: Analyze technology threat scores"""
255
- scores = state.tech_scores
256
- total_threat = scores.get_total_threat_level()
257
- dominant_threats = scores.get_dominant_threats()
258
-
259
- analysis = f"""TECHNOLOGY THREAT ANALYSIS (Year {scores.year}):
260
-
261
- Overall Threat Level: {total_threat:.3f} ({self._threat_level_description(total_threat)})
262
- Dominant Threats: {', '.join(dominant_threats) if dominant_threats else 'None above threshold'}
263
-
264
- Detailed Breakdown:
265
- - AI Threat Level: {scores.ai:.3f} - {self._ai_threat_analysis(scores.ai)}
266
- - Cyber Threat Level: {scores.cyber:.3f} - {self._cyber_threat_analysis(scores.cyber)}
267
- - Bio Threat Level: {scores.bio:.3f} - {self._bio_threat_analysis(scores.bio)}
268
- - Nuclear Threat Level: {scores.nuclear:.3f} - {self._nuclear_threat_analysis(scores.nuclear)}
269
- - Climate Threat Level: {scores.climate:.3f} - {self._climate_threat_analysis(scores.climate)}
270
- - Space Threat Level: {scores.space:.3f} - {self._space_threat_analysis(scores.space)}
271
-
272
- Cross-Domain Interactions:
273
- {self._analyze_cross_domain_effects(scores)}"""
274
-
275
- state.processed_analysis = analysis
276
- state.processing_steps.append("Technology threat analysis completed")
277
- return state
278
-
279
- def _assess_economic_context(self, state: LangGraphState) -> LangGraphState:
280
- """Step 2: Assess economic context and implications"""
281
- scores = state.tech_scores
282
- total_threat = scores.get_total_threat_level()
283
-
284
- # Economic impact assessment based on threat levels
285
- market_volatility = min(total_threat * 1.2, 1.0)
286
- gdp_impact = self._calculate_gdp_impact(scores)
287
- sector_impacts = self._analyze_sector_impacts(scores)
288
-
289
- economic_context = f"""ECONOMIC CONTEXT ASSESSMENT:
290
-
291
- Market Volatility Index: {market_volatility:.3f}
292
- Projected GDP Impact: {gdp_impact:.1f}%
293
- Economic Stability Risk: {self._stability_risk_level(total_threat)}
294
-
295
- Sector Impact Analysis:
296
- {sector_impacts}
297
-
298
- Regional Economic Vulnerabilities:
299
- {self._assess_regional_vulnerabilities(scores)}
300
-
301
- Supply Chain Disruption Risk: {self._supply_chain_risk(scores):.3f}
302
- Financial System Stress: {self._financial_stress_assessment(scores)}"""
303
-
304
- state.economic_context = economic_context
305
- state.processing_steps.append("Economic context assessment completed")
306
- return state
307
-
308
- def _evaluate_risks(self, state: LangGraphState) -> LangGraphState:
309
- """Step 3: Evaluate comprehensive risk scenarios"""
310
- scores = state.tech_scores
311
-
312
- # Risk scenario modeling
313
- cascade_probability = self._calculate_cascade_probability(scores)
314
- systemic_risk = self._assess_systemic_risk(scores)
315
- timeline_analysis = self._analyze_risk_timeline(scores)
316
-
317
- risk_assessment = f"""COMPREHENSIVE RISK EVALUATION:
318
-
319
- Cascade Failure Probability: {cascade_probability:.3f}
320
- Systemic Risk Level: {systemic_risk}
321
- Risk Materialization Timeline: {timeline_analysis}
322
-
323
- Critical Risk Scenarios:
324
- {self._generate_risk_scenarios(scores)}
325
-
326
- Mitigation Priorities:
327
- {self._prioritize_mitigations(scores)}
328
-
329
- Early Warning Indicators:
330
- {self._identify_warning_indicators(scores)}"""
331
-
332
- state.risk_assessment = risk_assessment
333
- state.processing_steps.append("Risk evaluation completed")
334
- return state
335
-
336
- def _generate_policy_recommendations(self, state: LangGraphState) -> LangGraphState:
337
- """Step 4: Generate policy recommendations"""
338
- scores = state.tech_scores
339
-
340
- immediate_actions = self._immediate_policy_actions(scores)
341
- medium_term_strategies = self._medium_term_strategies(scores)
342
- long_term_planning = self._long_term_planning(scores)
343
-
344
- policy_recs = f"""POLICY RECOMMENDATIONS FRAMEWORK:
345
-
346
- IMMEDIATE ACTIONS (0-6 months):
347
- {immediate_actions}
348
-
349
- MEDIUM-TERM STRATEGIES (6 months - 2 years):
350
- {medium_term_strategies}
351
-
352
- LONG-TERM PLANNING (2+ years):
353
- {long_term_planning}
354
-
355
- International Cooperation Requirements:
356
- {self._international_cooperation_needs(scores)}
357
-
358
- Resource Allocation Priorities:
359
- {self._resource_allocation_priorities(scores)}"""
360
-
361
- state.policy_recommendations = policy_recs
362
- state.processing_steps.append("Policy recommendations generated")
363
- return state
364
-
365
- def _create_final_prompt(self, state: LangGraphState) -> LangGraphState:
366
- """Step 5: Create optimized prompt for AEGIS Economics AI"""
367
-
368
- final_prompt = f"""You are AEGIS Economics AI, analyzing technology threats for economic policy planning.
369
-
370
- PROCESSED INTELLIGENCE BRIEFING:
371
-
372
- {state.processed_analysis}
373
-
374
- {state.economic_context}
375
-
376
- {state.risk_assessment}
377
-
378
- {state.policy_recommendations}
379
-
380
- ANALYSIS REQUEST:
381
- Based on this comprehensive intelligence briefing, provide your final economic analysis with:
382
-
383
- 1. MARKET SHOCK INDEX (0.0-1.0): Your calculated overall economic disruption risk
384
- 2. IMPACT CLASSIFICATION: Local/Regional/Global/Catastrophic
385
- 3. QUANTIFIED PREDICTIONS:
386
- - GDP impact percentage over 1, 3, and 5 years
387
- - Market volatility projections
388
- - Sector-specific impact percentages
389
- - Regional vulnerability rankings
390
-
391
- 4. ACTIONABLE INTELLIGENCE:
392
- - Top 3 immediate policy priorities
393
- - Critical economic indicators to monitor
394
- - Investment strategy recommendations
395
- - Risk mitigation timeline
396
-
397
- 5. CONFIDENCE ASSESSMENT:
398
- - Prediction confidence levels
399
- - Key uncertainty factors
400
- - Alternative scenario probabilities
401
-
402
- Provide specific numerical estimates and actionable recommendations for economic policy makers."""
403
-
404
- state.final_prompt = final_prompt
405
- state.processing_steps.append("Final prompt optimization completed")
406
- return state
407
-
408
- # Helper methods for analysis
409
- def _threat_level_description(self, total_threat: float) -> str:
410
- """Get threat level description from total threat score"""
411
- if total_threat >= 0.8: return "CRITICAL"
412
- elif total_threat >= 0.6: return "HIGH"
413
- elif total_threat >= 0.4: return "MODERATE"
414
- elif total_threat >= 0.2: return "LOW"
415
- else: return "MINIMAL"
416
-
417
- def _stability_risk_level(self, total_threat: float) -> str:
418
- """Calculate stability risk level from total threat"""
419
- if total_threat >= 0.8: return "CRITICAL"
420
- elif total_threat >= 0.6: return "HIGH"
421
- elif total_threat >= 0.4: return "MODERATE"
422
- elif total_threat >= 0.2: return "LOW"
423
- else: return "MINIMAL"
424
 
425
  def _ai_threat_analysis(self, score: float) -> str:
426
  if score >= 0.8: return "AGI/Singularity risk, massive economic disruption"
@@ -458,631 +115,271 @@ Provide specific numerical estimates and actionable recommendations for economic
458
  elif score >= 0.4: return "Space security concerns, increased space militarization"
459
  else: return "Stable space environment, continued commercial growth"
460
 
461
- def _calculate_gdp_impact(self, scores: TechScores) -> float:
462
- """Calculate projected GDP impact"""
463
- base_impact = scores.get_total_threat_level() * -15 # Base negative impact
464
-
465
- # Amplification factors
466
- if scores.ai >= 0.8: base_impact *= 1.5 # AI singularity amplification
467
- if scores.climate >= 0.8: base_impact *= 1.3 # Climate catastrophe
468
- if scores.nuclear >= 0.6: base_impact *= 2.0 # Nuclear conflict
469
-
470
- return max(base_impact, -50) # Cap at -50% GDP impact
471
-
472
- def _assess_regional_vulnerabilities(self, scores: TechScores) -> str:
473
- """Assess regional vulnerabilities"""
474
- regions = {
475
- 'North America': (scores.ai * 0.8 + scores.cyber * 0.9 + scores.climate * 0.6) / 3,
476
- 'Europe': (scores.cyber * 0.7 + scores.climate * 0.8 + scores.nuclear * 0.6) / 3,
477
- 'Asia Pacific': (scores.ai * 0.9 + scores.cyber * 0.8 + scores.climate * 0.7) / 3,
478
- 'China': (scores.ai * 0.9 + scores.cyber * 0.8 + scores.space * 0.7) / 3,
479
- 'Russia': (scores.nuclear * 0.9 + scores.cyber * 0.7 + scores.space * 0.6) / 3,
480
- 'Middle East': (scores.nuclear * 0.8 + scores.climate * 0.9 + scores.cyber * 0.5) / 3,
481
- 'Africa': (scores.climate * 0.9 + scores.bio * 0.7 + scores.cyber * 0.4) / 3,
482
- 'South America': (scores.climate * 0.8 + scores.bio * 0.6 + scores.cyber * 0.4) / 3
483
- }
484
-
485
- return "\n".join([f"- {region}: {vuln:.3f} vulnerability index" for region, vuln in regions.items()])
486
-
487
- def _calculate_cascade_probability(self, scores: TechScores) -> float:
488
- """Calculate cascade failure probability"""
489
- base_prob = scores.get_total_threat_level() * 0.8
490
-
491
- # Cross-domain amplification
492
- if scores.ai >= 0.7 and scores.cyber >= 0.7: base_prob += 0.2
493
- if scores.climate >= 0.8: base_prob += 0.15
494
- if scores.nuclear >= 0.5: base_prob += 0.25
495
-
496
- return min(base_prob, 1.0)
497
-
498
- def _assess_systemic_risk(self, scores: TechScores) -> str:
499
- """Assess systemic risk level"""
500
- risk_score = (scores.ai * 0.25 + scores.cyber * 0.25 + scores.nuclear * 0.2 +
501
- scores.climate * 0.15 + scores.bio * 0.1 + scores.space * 0.05)
502
-
503
- if risk_score >= 0.8: return "EXISTENTIAL - Civilization-level threats"
504
- elif risk_score >= 0.6: return "CRITICAL - System-wide failure risk"
505
- elif risk_score >= 0.4: return "HIGH - Major disruption likely"
506
- elif risk_score >= 0.2: return "MODERATE - Manageable with preparation"
507
- else: return "LOW - Standard risk management sufficient"
508
-
509
- def _generate_risk_scenarios(self, scores: TechScores) -> str:
510
- """Generate risk scenarios"""
511
- scenarios = []
512
-
513
- if scores.ai >= 0.8:
514
- scenarios.append("AI Singularity Scenario: Rapid economic obsolescence, mass unemployment")
515
- if scores.cyber >= 0.7 and scores.ai >= 0.5:
516
- scenarios.append("Cyber-AI Warfare: Autonomous attacks on critical infrastructure")
517
- if scores.climate >= 0.8:
518
- scenarios.append("Climate Collapse: Supply chain breakdown, mass migration")
519
- if scores.nuclear >= 0.6:
520
- scenarios.append("Nuclear Exchange: Regional economic devastation, global recession")
521
- if scores.bio >= 0.7:
522
- scenarios.append("Pandemic Scenario: Healthcare collapse, economic shutdown")
523
-
524
- return "\n".join([f"- {scenario}" for scenario in scenarios]) if scenarios else "- No critical scenarios identified"
525
-
526
- def _immediate_policy_actions(self, scores: TechScores) -> str:
527
- """Generate immediate policy actions"""
528
- actions = []
529
-
530
- if scores.ai >= 0.7:
531
- actions.append("Emergency AI safety protocols, regulatory frameworks")
532
- if scores.cyber >= 0.6:
533
- actions.append("Critical infrastructure protection, cyber emergency response")
534
- if scores.nuclear >= 0.5:
535
- actions.append("Nuclear security enhancement, crisis communication protocols")
536
- if scores.climate >= 0.7:
537
- actions.append("Climate emergency declarations, adaptation funding")
538
- if scores.bio >= 0.6:
539
- actions.append("Public health emergency preparedness, medical stockpiling")
540
-
541
- return "\n".join([f"- {action}" for action in actions]) if actions else "- Continue standard monitoring and preparedness"
542
-
543
- def _resource_allocation_priorities(self, scores: TechScores) -> str:
544
- """Calculate resource allocation priorities"""
545
- priorities = []
546
- threat_scores = scores.to_dict()
547
-
548
- # Calculate resource allocation based on threat levels
549
- total_threat = sum(threat_scores.values())
550
- if total_threat > 0:
551
- for threat, score in threat_scores.items():
552
- percentage = (score / total_threat) * 100
553
- if percentage >= 15: # Significant allocation threshold
554
- priorities.append(f"{threat} defense/mitigation: {percentage:.1f}% of emergency resources")
555
-
556
- return "\n".join([f"- {priority}" for priority in priorities]) if priorities else "- Balanced resource allocation across all domains"
557
-
558
- def _calculate_gdp_impact(self, scores: TechScores) -> float:
559
- """Calculate projected GDP impact"""
560
- base_impact = scores.get_total_threat_level() * -15 # Base negative impact
561
-
562
- # Amplification factors
563
- if scores.ai >= 0.8: base_impact *= 1.5 # AI singularity amplification
564
- if scores.climate >= 0.8: base_impact *= 1.3 # Climate catastrophe
565
- if scores.nuclear >= 0.6: base_impact *= 2.0 # Nuclear conflict
566
-
567
- return max(base_impact, -50) # Cap at -50% GDP impact
568
-
569
- def _analyze_sector_impacts(self, scores: TechScores) -> str:
570
- sectors = {
571
- 'Technology': max(scores.ai, scores.cyber) * 100,
572
- 'Finance': (scores.cyber * 0.8 + scores.ai * 0.6) * 100,
573
- 'Energy': (scores.climate * 0.9 + scores.cyber * 0.5) * 100,
574
- 'Healthcare': (scores.bio * 0.9 + scores.ai * 0.4) * 100,
575
- 'Defense': (scores.nuclear * 0.8 + scores.space * 0.7 + scores.cyber * 0.6) * 100,
576
- 'Agriculture': (scores.climate * 0.8 + scores.bio * 0.5) * 100,
577
- 'Transportation': (scores.cyber * 0.6 + scores.climate * 0.5 + scores.ai * 0.4) * 100,
578
- 'Manufacturing': (scores.ai * 0.7 + scores.cyber * 0.5 + scores.climate * 0.4) * 100
579
- }
580
-
581
- return "\n".join([f"- {sector}: {impact:.1f}% disruption risk" for sector, impact in sectors.items()])
582
-
583
- def _assess_regional_vulnerabilities(self, scores: TechScores) -> str:
584
- regions = {
585
- 'North America': (scores.ai * 0.8 + scores.cyber * 0.9 + scores.climate * 0.6) / 3,
586
- 'Europe': (scores.cyber * 0.7 + scores.climate * 0.8 + scores.nuclear * 0.6) / 3,
587
- 'Asia Pacific': (scores.ai * 0.9 + scores.cyber * 0.8 + scores.climate * 0.7) / 3,
588
- 'China': (scores.ai * 0.9 + scores.cyber * 0.8 + scores.space * 0.7) / 3,
589
- 'Russia': (scores.nuclear * 0.9 + scores.cyber * 0.7 + scores.space * 0.6) / 3,
590
- 'Middle East': (scores.nuclear * 0.8 + scores.climate * 0.9 + scores.cyber * 0.5) / 3,
591
- 'Africa': (scores.climate * 0.9 + scores.bio * 0.7 + scores.cyber * 0.4) / 3,
592
- 'South America': (scores.climate * 0.8 + scores.bio * 0.6 + scores.cyber * 0.4) / 3
593
- }
594
-
595
- return "\n".join([f"- {region}: {vuln:.3f} vulnerability index" for region, vuln in regions.items()])
596
-
597
- def _supply_chain_risk(self, scores: TechScores) -> float:
598
- return min((scores.cyber * 0.4 + scores.climate * 0.3 + scores.ai * 0.2 + scores.bio * 0.1), 1.0)
599
-
600
- def _financial_stress_assessment(self, scores: TechScores) -> str:
601
- stress_level = scores.cyber * 0.4 + scores.ai * 0.3 + scores.nuclear * 0.3
602
- if stress_level >= 0.8: return "CRITICAL - System failure risk"
603
- elif stress_level >= 0.6: return "HIGH - Major instability expected"
604
- elif stress_level >= 0.4: return "MODERATE - Increased volatility"
605
- else: return "LOW - Manageable stress levels"
606
-
607
- def _calculate_cascade_probability(self, scores: TechScores) -> float:
608
- # Complex cascade probability calculation
609
- base_prob = scores.get_total_threat_level() * 0.8
610
-
611
- # Cross-domain amplification
612
- if scores.ai >= 0.7 and scores.cyber >= 0.7: base_prob += 0.2
613
- if scores.climate >= 0.8: base_prob += 0.15
614
- if scores.nuclear >= 0.5: base_prob += 0.25
615
-
616
- return min(base_prob, 1.0)
617
-
618
- def _assess_systemic_risk(self, scores: TechScores) -> str:
619
- risk_score = (scores.ai * 0.25 + scores.cyber * 0.25 + scores.nuclear * 0.2 +
620
- scores.climate * 0.15 + scores.bio * 0.1 + scores.space * 0.05)
621
-
622
- if risk_score >= 0.8: return "EXISTENTIAL - Civilization-level threats"
623
- elif risk_score >= 0.6: return "CRITICAL - System-wide failure risk"
624
- elif risk_score >= 0.4: return "HIGH - Major disruption likely"
625
- elif risk_score >= 0.2: return "MODERATE - Manageable with preparation"
626
- else: return "LOW - Standard risk management sufficient"
627
-
628
- def _analyze_risk_timeline(self, scores: TechScores) -> str:
629
- timelines = []
630
- if scores.cyber >= 0.6: timelines.append("Cyber threats: Immediate (0-6 months)")
631
- if scores.ai >= 0.7: timelines.append("AI disruption: Near-term (1-3 years)")
632
- if scores.climate >= 0.6: timelines.append("Climate impacts: Medium-term (2-5 years)")
633
- if scores.nuclear >= 0.5: timelines.append("Nuclear risks: Variable (immediate to long-term)")
634
- if scores.bio >= 0.5: timelines.append("Biological threats: Short to medium-term (6 months - 2 years)")
635
- if scores.space >= 0.6: timelines.append("Space threats: Medium-term (1-5 years)")
636
-
637
- return "\n".join([f"- {timeline}" for timeline in timelines]) if timelines else "- No immediate timeline concerns"
638
-
639
- def _generate_risk_scenarios(self, scores: TechScores) -> str:
640
- scenarios = []
641
-
642
- if scores.ai >= 0.8:
643
- scenarios.append("AI Singularity Scenario: Rapid economic obsolescence, mass unemployment")
644
- if scores.cyber >= 0.7 and scores.ai >= 0.5:
645
- scenarios.append("Cyber-AI Warfare: Autonomous attacks on critical infrastructure")
646
- if scores.climate >= 0.8:
647
- scenarios.append("Climate Collapse: Supply chain breakdown, mass migration")
648
- if scores.nuclear >= 0.6:
649
- scenarios.append("Nuclear Exchange: Regional economic devastation, global recession")
650
- if scores.bio >= 0.7:
651
- scenarios.append("Pandemic Scenario: Healthcare collapse, economic shutdown")
652
-
653
- return "\n".join([f"- {scenario}" for scenario in scenarios]) if scenarios else "- No critical scenarios identified"
654
-
655
- def _prioritize_mitigations(self, scores: TechScores) -> str:
656
- mitigations = []
657
- threat_scores = scores.to_dict()
658
-
659
- # Sort by threat level
660
- sorted_threats = sorted(threat_scores.items(), key=lambda x: x[1], reverse=True)
661
-
662
- for threat, score in sorted_threats[:3]: # Top 3 threats
663
- if score >= 0.5:
664
- if threat == 'AI':
665
- mitigations.append("AI governance frameworks, safety research funding")
666
- elif threat == 'Cyber':
667
- mitigations.append("Critical infrastructure hardening, cyber defense capabilities")
668
- elif threat == 'Bio':
669
- mitigations.append("Pandemic preparedness, biosecurity measures")
670
- elif threat == 'Nuclear':
671
- mitigations.append("Nuclear security, arms control agreements")
672
- elif threat == 'Climate':
673
- mitigations.append("Climate adaptation, green transition acceleration")
674
- elif threat == 'Space':
675
- mitigations.append("Space debris mitigation, satellite protection")
676
-
677
- return "\n".join([f"- {mit}" for mit in mitigations]) if mitigations else "- Standard risk management protocols sufficient"
678
-
679
- def _identify_warning_indicators(self, scores: TechScores) -> str:
680
- indicators = []
681
-
682
- if scores.ai >= 0.6:
683
- indicators.append("AI capability benchmarks, automation deployment rates")
684
- if scores.cyber >= 0.5:
685
- indicators.append("Cyber incident frequency, critical system vulnerabilities")
686
- if scores.climate >= 0.6:
687
- indicators.append("Climate tipping point indicators, extreme weather frequency")
688
- if scores.nuclear >= 0.4:
689
- indicators.append("Nuclear material security, geopolitical tensions")
690
- if scores.bio >= 0.5:
691
- indicators.append("Disease surveillance systems, biosafety incidents")
692
- if scores.space >= 0.5:
693
- indicators.append("Space debris levels, satellite collision risks")
694
-
695
- return "\n".join([f"- {indicator}" for indicator in indicators]) if indicators else "- Standard monitoring protocols"
696
-
697
- def _immediate_policy_actions(self, scores: TechScores) -> str:
698
- actions = []
699
-
700
- if scores.ai >= 0.7:
701
- actions.append("Emergency AI safety protocols, regulatory frameworks")
702
- if scores.cyber >= 0.6:
703
- actions.append("Critical infrastructure protection, cyber emergency response")
704
- if scores.nuclear >= 0.5:
705
- actions.append("Nuclear security enhancement, crisis communication protocols")
706
- if scores.climate >= 0.7:
707
- actions.append("Climate emergency declarations, adaptation funding")
708
- if scores.bio >= 0.6:
709
- actions.append("Public health emergency preparedness, medical stockpiling")
710
-
711
- return "\n".join([f"- {action}" for action in actions]) if actions else "- Continue standard monitoring and preparedness"
712
-
713
- def _medium_term_strategies(self, scores: TechScores) -> str:
714
- strategies = []
715
-
716
- if scores.ai >= 0.5:
717
- strategies.append("AI workforce transition programs, safety research investment")
718
- if scores.cyber >= 0.4:
719
- strategies.append("Cybersecurity infrastructure modernization, international cooperation")
720
- if scores.climate >= 0.5:
721
- strategies.append("Green infrastructure investment, carbon pricing mechanisms")
722
- if scores.nuclear >= 0.3:
723
- strategies.append("Nuclear security upgrades, non-proliferation efforts")
724
- if scores.bio >= 0.4:
725
- strategies.append("Healthcare system resilience, biosecurity capabilities")
726
-
727
- return "\n".join([f"- {strategy}" for strategy in strategies]) if strategies else "- Standard policy development"
728
-
729
- def _long_term_planning(self, scores: TechScores) -> str:
730
- plans = []
731
-
732
- if scores.ai >= 0.4:
733
- plans.append("Post-AI economic models, universal basic income consideration")
734
- if scores.climate >= 0.4:
735
- plans.append("Climate-resilient infrastructure, economic transformation")
736
- if scores.cyber >= 0.3:
737
- plans.append("Quantum-safe cryptography, next-gen security architectures")
738
- if scores.nuclear >= 0.2:
739
- plans.append("Nuclear disarmament pathways, alternative security frameworks")
740
-
741
- return "\n".join([f"- {plan}" for plan in plans]) if plans else "- Continue adaptive planning processes"
742
-
743
- def _international_cooperation_needs(self, scores: TechScores) -> str:
744
- needs = []
745
-
746
- if scores.ai >= 0.6:
747
- needs.append("Global AI governance, safety standards coordination")
748
- if scores.cyber >= 0.5:
749
- needs.append("Cyber norms, attribution mechanisms, response coordination")
750
- if scores.climate >= 0.6:
751
- needs.append("Enhanced climate cooperation, technology transfer")
752
- if scores.nuclear >= 0.4:
753
- needs.append("Arms control renewal, nuclear security cooperation")
754
- if scores.bio >= 0.5:
755
- needs.append("Global health security, pandemic preparedness")
756
-
757
- return "\n".join([f"- {need}" for need in needs]) if needs else "- Standard international engagement"
758
-
759
- def _resource_allocation_priorities(self, scores: TechScores) -> str:
760
- priorities = []
761
- threat_scores = scores.to_dict()
762
-
763
- # Calculate resource allocation based on threat levels
764
- total_threat = sum(threat_scores.values())
765
- if total_threat > 0:
766
- for threat, score in threat_scores.items():
767
- percentage = (score / total_threat) * 100
768
- if percentage >= 15: # Significant allocation threshold
769
- priorities.append(f"{threat} defense/mitigation: {percentage:.1f}% of emergency resources")
770
-
771
- return "\n".join([f"- {priority}" for priority in priorities]) if priorities else "- Balanced resource allocation across all domains"
772
 
773
  def process_tech_scores(self, tech_scores: TechScores) -> Dict[str, Any]:
774
- """Process tech scores through LangGraph pipeline"""
775
- if not self.graph:
776
- logger.warning("LangGraph not available, using simplified processing")
777
- return self._simplified_processing(tech_scores)
778
-
779
- try:
780
- # Initialize state as dict for compatibility
781
- initial_state = {
782
- 'tech_scores': tech_scores,
783
- 'processing_steps': []
784
- }
785
-
786
- # Run through LangGraph pipeline
787
- result = self.graph.invoke(initial_state)
788
-
789
- return {
790
- 'success': True,
791
- 'processed_analysis': result.get('processed_analysis'),
792
- 'economic_context': result.get('economic_context'),
793
- 'risk_assessment': result.get('risk_assessment'),
794
- 'policy_recommendations': result.get('policy_recommendations'),
795
- 'final_prompt': result.get('final_prompt'),
796
- 'processing_steps': result.get('processing_steps', []),
797
- 'metadata': {
798
- 'total_threat_level': tech_scores.get_total_threat_level(),
799
- 'dominant_threats': tech_scores.get_dominant_threats(),
800
- 'processing_timestamp': datetime.now().isoformat()
801
- }
802
- }
803
-
804
- except Exception as e:
805
- logger.error(f"LangGraph processing failed: {e}")
806
- return self._simplified_processing(tech_scores)
807
-
808
- def _simplified_processing(self, tech_scores: TechScores) -> Dict[str, Any]:
809
- """Fallback processing without LangGraph"""
810
  total_threat = tech_scores.get_total_threat_level()
811
  dominant_threats = tech_scores.get_dominant_threats()
812
 
813
- simplified_prompt = f"""Analyze these technology threat scores for year {tech_scores.year}:
814
 
815
- AI Score: {tech_scores.ai:.3f}
816
- Cyber Score: {tech_scores.cyber:.3f}
817
- Bio Score: {tech_scores.bio:.3f}
818
- Nuclear Score: {tech_scores.nuclear:.3f}
819
- Climate Score: {tech_scores.climate:.3f}
820
- Space Score: {tech_scores.space:.3f}
821
 
822
- Overall Threat Level: {total_threat:.3f}
823
- Dominant Threats: {', '.join(dominant_threats) if dominant_threats else 'None'}
 
 
 
 
 
 
 
824
 
825
- Provide comprehensive economic analysis including:
826
- 1. Market shock index (0-1 scale)
827
- 2. Impact classification (Local/Regional/Global/Catastrophic)
828
- 3. GDP impact projections
829
- 4. Regional vulnerability assessment
830
- 5. Policy recommendations
831
- 6. Investment strategies
832
 
833
- Focus on quantitative metrics and actionable insights."""
834
 
835
  return {
836
  'success': True,
837
  'final_prompt': simplified_prompt,
838
- 'processing_steps': ['Simplified processing completed'],
839
  'metadata': {
840
  'total_threat_level': total_threat,
841
  'dominant_threats': dominant_threats,
 
842
  'processing_mode': 'simplified'
843
  }
844
  }
845
 
846
- # Initialize LangGraph processor
847
- langgraph_processor = LangGraphProcessor()
848
 
849
  def load_model():
850
- """Load the model and tokenizer from Gaston895/Aegisecon1 repository using the working approach"""
851
- global model, tokenizer, chat_pipeline
852
 
853
  try:
854
- logger.info("Loading model and tokenizer from Hugging Face...")
855
 
856
- # Load from the deployed model repository
857
  model_repo = "Gaston895/Aegisecon1"
858
 
859
- logger.info(f"Loading tokenizer from {model_repo}...")
860
  tokenizer = AutoTokenizer.from_pretrained(
861
  model_repo,
862
  trust_remote_code=True,
863
  use_auth_token=False
864
  )
865
 
866
- # Fix pad token issue to resolve attention mask warning
867
  if tokenizer.pad_token is None:
868
  tokenizer.pad_token = tokenizer.eos_token
869
- tokenizer.pad_token_id = tokenizer.eos_token_id
870
 
871
- logger.info(f"Loading model from {model_repo}...")
872
  model = AutoModelForCausalLM.from_pretrained(
873
  model_repo,
874
- torch_dtype=torch.float16, # Use float16 for better compatibility
875
- device_map="cpu", # Force CPU for HF Spaces compatibility
876
  trust_remote_code=True,
877
  use_auth_token=False,
878
- low_cpu_mem_usage=True
 
 
879
  )
880
 
881
- # Don't create pipeline - use direct model generation like the working version
882
- chat_pipeline = None # Set to None to indicate we're using direct generation
883
 
884
- logger.info("Model loaded successfully from HF repository!")
885
- logger.info(f"Model device: {next(model.parameters()).device}")
886
- logger.info(f"Model dtype: {next(model.parameters()).dtype}")
887
 
888
  return True
889
 
890
  except Exception as e:
891
- logger.error(f"Error loading model from HF: {str(e)}")
892
- # Try alternative loading method
 
893
  try:
894
- logger.info("Trying alternative loading method...")
895
  tokenizer = AutoTokenizer.from_pretrained(
896
- "Qwen/Qwen2-1.5B", # Fallback to base model
897
  trust_remote_code=True
898
  )
899
- # Fix pad token for fallback too
900
  if tokenizer.pad_token is None:
901
  tokenizer.pad_token = tokenizer.eos_token
902
- tokenizer.pad_token_id = tokenizer.eos_token_id
903
  model = AutoModelForCausalLM.from_pretrained(
904
  "Qwen/Qwen2-1.5B",
905
- torch_dtype=torch.float16,
906
  device_map="cpu",
907
  trust_remote_code=True,
908
  low_cpu_mem_usage=True
909
  )
910
- chat_pipeline = None
911
- logger.info("Fallback model loaded successfully!")
 
912
  return True
 
913
  except Exception as e2:
914
- logger.error(f"Fallback loading also failed: {str(e2)}")
915
  return False
916
 
917
- def generate_response(prompt, temperature=0.7):
918
- """Generate response using direct model generation (like the working app_gunicorn.py)"""
 
 
919
  try:
920
  if model is None or tokenizer is None:
921
- return "Model is still loading, please wait a moment and try again..."
922
 
923
- # Economics-focused system prompt (like the working version)
924
- system_prompt = """You are AEGIS Economics AI, an expert economic analyst and policy advisor.
925
- Provide clear, accurate, and insightful responses about economics, finance, markets, and policy.
926
- Focus on practical analysis and actionable insights."""
927
 
928
- full_prompt = f"{system_prompt}\n\nUser: {prompt}\nAssistant:"
 
 
 
 
 
929
 
930
- # Tokenize input with attention mask (fixes the warning)
931
  inputs = tokenizer(
932
  full_prompt,
933
  return_tensors="pt",
934
  truncation=True,
935
- max_length=1024,
936
- padding=True,
937
- return_attention_mask=True
938
  )
939
 
940
- # Generate response with attention mask
941
  with torch.no_grad():
942
  outputs = model.generate(
943
  inputs.input_ids,
944
- attention_mask=inputs.attention_mask, # Add attention mask
945
- max_new_tokens=256,
946
- temperature=temperature,
947
- do_sample=True,
948
- pad_token_id=tokenizer.pad_token_id, # Use pad_token_id instead of eos_token_id
949
  eos_token_id=tokenizer.eos_token_id,
950
- repetition_penalty=1.1,
951
- no_repeat_ngram_size=3
 
 
952
  )
953
 
954
- # Decode response (like the working version)
955
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
956
 
957
- # Extract only the assistant's response (like the working version)
958
  if "Assistant:" in response:
959
  response = response.split("Assistant:")[-1].strip()
960
 
 
 
 
 
 
 
 
961
  return response
962
 
 
 
963
  except Exception as e:
964
- logger.error(f"Error generating response: {str(e)}")
965
- return "I apologize, but I'm having trouble processing your request right now. Please try again in a moment."
 
 
 
966
 
967
- # HTML template (same as before)
968
  HTML_TEMPLATE = """
969
  <!DOCTYPE html>
970
  <html>
971
  <head>
972
- <title>AEGIS Economics AI with LangGraph</title>
973
  <meta charset="utf-8">
974
  <meta name="viewport" content="width=device-width, initial-scale=1">
975
  <style>
976
  body { font-family: Arial, sans-serif; margin: 0; padding: 20px; background: #f5f5f5; }
977
  .container { max-width: 900px; margin: 0 auto; background: white; padding: 20px; border-radius: 10px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
978
- .header { text-align: center; margin-bottom: 30px; }
979
- .pipeline { background: #e3f2fd; padding: 15px; border-radius: 5px; margin-bottom: 20px; }
980
  .chat-container { border: 1px solid #ddd; border-radius: 5px; height: 400px; overflow-y: auto; padding: 10px; margin-bottom: 20px; background: #fafafa; }
981
- .message { margin: 10px 0; padding: 10px; border-radius: 5px; }
982
- .user-message { background: #007bff; color: white; margin-left: 20%; }
983
- .ai-message { background: #e9ecef; color: #333; margin-right: 20%; }
984
- .system-message { background: #fff3cd; color: #856404; border: 1px solid #ffeaa7; }
985
  .input-group { display: flex; gap: 10px; }
986
  .input-field { flex: 1; padding: 10px; border: 1px solid #ddd; border-radius: 5px; }
987
  .send-btn { padding: 10px 20px; background: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer; }
988
- .send-btn:hover { background: #0056b3; }
989
  .loading { text-align: center; color: #666; font-style: italic; }
990
- .tech-scores { background: #f8f9fa; padding: 15px; border-radius: 5px; margin-bottom: 20px; }
991
- .score-input { width: 80px; padding: 5px; margin: 5px; border: 1px solid #ddd; border-radius: 3px; }
992
- .process-btn { background: #28a745; margin-left: 10px; }
993
- .process-btn:hover { background: #218838; }
994
  </style>
995
  </head>
996
  <body>
997
  <div class="container">
998
  <div class="header">
999
- <h1>🏛️ AEGIS Economics AI with LangGraph</h1>
1000
- <p>Advanced Economic Analysis with Multi-Stage Processing Pipeline</p>
1001
- </div>
1002
-
1003
- <div class="pipeline">
1004
- <h3>🔄 Processing Pipeline</h3>
1005
- <p><strong>Tech Scores → LangGraph Analysis → AEGIS Economics AI → Final Predictions</strong></p>
1006
- <p>LangGraph processes technology threats through multiple analysis stages before final AI prediction.</p>
1007
- </div>
1008
-
1009
- <div class="tech-scores">
1010
- <h3>🎯 Technology Threat Scores (0.0 - 1.0)</h3>
1011
- <div style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 15px;">
1012
- <div>
1013
- <label>AI Score:</label><br>
1014
- <input type="number" id="ai-score" class="score-input" min="0" max="1" step="0.1" value="0.5">
1015
- </div>
1016
- <div>
1017
- <label>Cyber Score:</label><br>
1018
- <input type="number" id="cyber-score" class="score-input" min="0" max="1" step="0.1" value="0.4">
1019
- </div>
1020
- <div>
1021
- <label>Bio Score:</label><br>
1022
- <input type="number" id="bio-score" class="score-input" min="0" max="1" step="0.1" value="0.3">
1023
- </div>
1024
- <div>
1025
- <label>Nuclear Score:</label><br>
1026
- <input type="number" id="nuclear-score" class="score-input" min="0" max="1" step="0.1" value="0.2">
1027
- </div>
1028
- <div>
1029
- <label>Climate Score:</label><br>
1030
- <input type="number" id="climate-score" class="score-input" min="0" max="1" step="0.1" value="0.6">
1031
- </div>
1032
- <div>
1033
- <label>Space Score:</label><br>
1034
- <input type="number" id="space-score" class="score-input" min="0" max="1" step="0.1" value="0.3">
1035
- </div>
1036
- </div>
1037
- <div style="margin-top: 15px;">
1038
- <label>Analysis Year:</label>
1039
- <input type="number" id="year-input" class="score-input" min="2024" max="2100" value="2030">
1040
- <button onclick="processTechScores()" class="send-btn process-btn">🚀 Process via LangGraph</button>
1041
- </div>
1042
  </div>
1043
 
1044
  <div id="chat-container" class="chat-container">
1045
  <div class="message ai-message">
1046
- Hello! I'm AEGIS Economics AI with LangGraph processing.
1047
  <br><br>
1048
- <strong>Two ways to interact:</strong><br>
1049
- 1. <strong>Tech Score Analysis:</strong> Set threat scores above and click "Process via LangGraph"<br>
1050
- 2. <strong>Direct Chat:</strong> Ask economic questions below
1051
- <div id="model-status" style="font-size: 0.8em; color: #666; margin-top: 10px;">
1052
- Checking model status...
1053
- </div>
1054
  </div>
1055
  </div>
1056
 
1057
  <div class="input-group">
1058
- <input type="text" id="user-input" class="input-field" placeholder="Ask about economics, or use tech scores above..." onkeypress="handleKeyPress(event)">
1059
- <button onclick="sendMessage()" class="send-btn">Send</button>
1060
  </div>
1061
  </div>
1062
 
1063
  <script>
1064
- // Check model status on page load
1065
- async function checkModelStatus() {
 
 
 
 
 
 
 
 
 
 
 
 
1066
  try {
1067
  const response = await fetch('/health');
1068
  const data = await response.json();
1069
- const statusDiv = document.getElementById('model-status');
1070
-
1071
- if (data.model_loaded) {
1072
- statusDiv.innerHTML = '✅ Model loaded and ready!<br>🔄 LangGraph pipeline: ' + (data.langgraph_available ? 'Available' : 'Simplified mode');
1073
- statusDiv.style.color = '#28a745';
1074
- } else {
1075
- statusDiv.textContent = '⏳ Model loading... Please wait.';
1076
- statusDiv.style.color = '#ffc107';
1077
- }
1078
  } catch (error) {
1079
- const statusDiv = document.getElementById('model-status');
1080
- statusDiv.textContent = '❌ Connection error';
1081
- statusDiv.style.color = '#dc3545';
1082
  }
1083
  }
1084
 
1085
- window.onload = checkModelStatus;
1086
 
1087
  function handleKeyPress(event) {
1088
  if (event.key === 'Enter') {
@@ -1094,85 +391,29 @@ HTML_TEMPLATE = """
1094
  const chatContainer = document.getElementById('chat-container');
1095
  const messageDiv = document.createElement('div');
1096
  messageDiv.className = `message ${type}-message`;
1097
-
1098
- if (typeof content === 'string') {
1099
- messageDiv.innerHTML = content.replace(/\\n/g, '<br>');
1100
- } else {
1101
- messageDiv.textContent = content;
1102
- }
1103
-
1104
  chatContainer.appendChild(messageDiv);
1105
  chatContainer.scrollTop = chatContainer.scrollHeight;
1106
  }
1107
 
1108
- function showLoading(message = 'AI is processing...') {
1109
  const chatContainer = document.getElementById('chat-container');
1110
  const loadingDiv = document.createElement('div');
1111
  loadingDiv.className = 'loading';
1112
  loadingDiv.id = 'loading';
1113
- loadingDiv.textContent = message;
1114
  chatContainer.appendChild(loadingDiv);
1115
  chatContainer.scrollTop = chatContainer.scrollHeight;
1116
  }
1117
 
1118
  function hideLoading() {
1119
  const loading = document.getElementById('loading');
1120
- if (loading) {
1121
- loading.remove();
1122
- }
1123
- }
1124
-
1125
- async function processTechScores() {
1126
- const techScores = {
1127
- ai: parseFloat(document.getElementById('ai-score').value),
1128
- cyber: parseFloat(document.getElementById('cyber-score').value),
1129
- bio: parseFloat(document.getElementById('bio-score').value),
1130
- nuclear: parseFloat(document.getElementById('nuclear-score').value),
1131
- climate: parseFloat(document.getElementById('climate-score').value),
1132
- space: parseFloat(document.getElementById('space-score').value),
1133
- year: parseInt(document.getElementById('year-input').value)
1134
- };
1135
-
1136
- addMessage(`🎯 Processing Tech Scores via LangGraph Pipeline:<br>
1137
- AI: ${techScores.ai}, Cyber: ${techScores.cyber}, Bio: ${techScores.bio}<br>
1138
- Nuclear: ${techScores.nuclear}, Climate: ${techScores.climate}, Space: ${techScores.space}<br>
1139
- Year: ${techScores.year}`, 'user');
1140
-
1141
- showLoading('🔄 LangGraph processing tech scores... This may take several minutes...');
1142
-
1143
- try {
1144
- const response = await fetch('/process_tech_scores', {
1145
- method: 'POST',
1146
- headers: { 'Content-Type': 'application/json' },
1147
- body: JSON.stringify(techScores)
1148
- });
1149
-
1150
- const data = await response.json();
1151
- hideLoading();
1152
-
1153
- if (data.success) {
1154
- if (data.processing_steps) {
1155
- addMessage(`📋 LangGraph Processing Steps:<br>• ${data.processing_steps.join('<br>• ')}`, 'system');
1156
- }
1157
-
1158
- if (data.final_analysis) {
1159
- addMessage(`🏛️ AEGIS Economics AI Final Analysis:<br><br>${data.final_analysis}`, 'ai');
1160
- } else {
1161
- addMessage('✅ LangGraph processing completed. Check the analysis above.', 'system');
1162
- }
1163
- } else {
1164
- addMessage(`❌ Processing failed: ${data.error}`, 'system');
1165
- }
1166
- } catch (error) {
1167
- hideLoading();
1168
- addMessage('❌ Connection error during tech score processing.', 'system');
1169
- }
1170
  }
1171
 
1172
  async function sendMessage() {
1173
  const input = document.getElementById('user-input');
1174
  const message = input.value.trim();
1175
-
1176
  if (!message) return;
1177
 
1178
  addMessage(message, 'user');
@@ -1192,7 +433,7 @@ HTML_TEMPLATE = """
1192
  if (data.response) {
1193
  addMessage(data.response, 'ai');
1194
  } else {
1195
- addMessage('Sorry, I encountered an error. Please try again.', 'ai');
1196
  }
1197
  } catch (error) {
1198
  hideLoading();
@@ -1211,11 +452,10 @@ def home():
1211
 
1212
  @app.route('/process_tech_scores', methods=['POST'])
1213
  def process_tech_scores():
1214
- """Process technology scores through LangGraph pipeline with memory optimization"""
1215
  try:
1216
  data = request.get_json()
1217
 
1218
- # Create TechScores object
1219
  tech_scores = TechScores(
1220
  ai=data.get('ai', 0.0),
1221
  cyber=data.get('cyber', 0.0),
@@ -1226,161 +466,74 @@ def process_tech_scores():
1226
  year=data.get('year', 2024)
1227
  )
1228
 
1229
- logger.info(f"Processing tech scores: {tech_scores.to_dict()}")
1230
-
1231
- # Clean memory before processing
1232
- cleanup_memory()
1233
-
1234
- # Process through LangGraph with timeout
1235
- try:
1236
- langgraph_result = langgraph_processor.process_tech_scores(tech_scores)
1237
- except Exception as e:
1238
- logger.error(f"LangGraph processing failed: {e}")
1239
- # Fallback to simplified processing
1240
- langgraph_result = langgraph_processor._simplified_processing(tech_scores)
1241
-
1242
- if not langgraph_result['success']:
1243
- return jsonify({'success': False, 'error': 'LangGraph processing failed'})
1244
-
1245
- # Get the optimized prompt from LangGraph
1246
- final_prompt = langgraph_result['final_prompt']
1247
 
1248
- # Truncate prompt if too long to save memory
1249
- if len(final_prompt) > 1000:
1250
- final_prompt = final_prompt[:1000] + "... [truncated for efficiency]"
1251
 
1252
- # Generate final analysis using AEGIS Economics AI
1253
- logger.info("Generating final analysis with AEGIS Economics AI...")
1254
- final_analysis = generate_response(final_prompt)
1255
 
1256
- # Clean memory after processing
1257
- cleanup_memory()
1258
 
1259
  return jsonify({
1260
  'success': True,
1261
- 'processing_steps': langgraph_result.get('processing_steps', []),
1262
- 'langgraph_metadata': langgraph_result.get('metadata', {}),
1263
- 'final_analysis': final_analysis,
1264
- 'processing_mode': 'langgraph' if LANGGRAPH_AVAILABLE else 'simplified'
1265
  })
1266
 
1267
  except Exception as e:
1268
- logger.error(f"Error in tech score processing: {str(e)}")
1269
- cleanup_memory() # Clean memory on error
1270
  return jsonify({'success': False, 'error': str(e)}), 500
1271
 
1272
  @app.route('/chat', methods=['POST'])
1273
  def chat():
1274
- """Handle regular chat messages"""
1275
  try:
1276
  data = request.get_json()
1277
  user_message = data.get('message', '')
1278
 
1279
  if not user_message:
1280
- return jsonify({'error': 'No message provided'}), 400
1281
-
1282
- # Add economics context to regular chat
1283
- economics_prompt = f"""You are AEGIS Economics AI, an expert economic analyst and policy advisor.
1284
- Provide clear, accurate, and insightful responses about economics, finance, markets, and policy.
1285
- Focus on practical analysis and actionable insights.
1286
-
1287
- User: {user_message}
1288
- Assistant:"""
1289
 
1290
- # Generate AI response
1291
- ai_response = generate_response(economics_prompt)
1292
 
1293
- return jsonify({'response': ai_response})
1294
 
1295
  except Exception as e:
1296
- logger.error(f"Error in chat endpoint: {str(e)}")
1297
- return jsonify({'error': 'Internal server error'}), 500
1298
 
1299
- @app.route('/load_model_manual', methods=['POST'])
1300
- def load_model_manual():
1301
- """Manually trigger model loading"""
 
 
 
 
 
 
 
 
 
 
1302
  try:
1303
- logger.info("🔄 Manual model loading triggered...")
1304
- success = load_model()
 
 
1305
 
 
 
1306
  return jsonify({
1307
  'success': success,
1308
- 'model_loaded': model is not None,
1309
- 'tokenizer_loaded': tokenizer is not None,
1310
- 'message': 'Model loaded successfully' if success else 'Model loading failed'
1311
  })
1312
  except Exception as e:
1313
- logger.error(f"Manual model loading error: {str(e)}")
1314
  return jsonify({'success': False, 'error': str(e)}), 500
1315
 
1316
- @app.route('/startup_check')
1317
- def startup_check():
1318
- """Check startup status and trigger model loading if needed"""
1319
- try:
1320
- status = {
1321
- 'model_loaded': chat_pipeline is not None,
1322
- 'tokenizer_loaded': tokenizer is not None,
1323
- 'langgraph_available': LANGGRAPH_AVAILABLE
1324
- }
1325
-
1326
- # If model not loaded, try to load it
1327
- if not chat_pipeline:
1328
- logger.info("Model not loaded, attempting to load...")
1329
- success = load_model()
1330
- status['model_load_attempted'] = True
1331
- status['model_load_success'] = success
1332
- status['model_loaded'] = chat_pipeline is not None
1333
-
1334
- return jsonify(status)
1335
-
1336
- except Exception as e:
1337
- logger.error(f"Startup check error: {str(e)}")
1338
- return jsonify({'error': str(e)}), 500
1339
- @app.route('/health')
1340
- def health():
1341
- """Health check endpoint"""
1342
- return jsonify({
1343
- 'status': 'healthy',
1344
- 'model_loaded': model is not None,
1345
- 'tokenizer_loaded': tokenizer is not None,
1346
- 'langgraph_available': LANGGRAPH_AVAILABLE,
1347
- 'processing_mode': 'langgraph' if LANGGRAPH_AVAILABLE else 'simplified'
1348
- })
1349
-
1350
- @app.errorhandler(Exception)
1351
- def handle_exception(e):
1352
- """Handle all unhandled exceptions"""
1353
- logger.error(f"Unhandled exception: {e}", exc_info=True)
1354
- return jsonify({
1355
- 'error': 'Internal server error',
1356
- 'message': str(e)
1357
- }), 500
1358
-
1359
  if __name__ == '__main__':
1360
- # Load model on startup
1361
- logger.info("Starting AEGIS Economics AI with LangGraph...")
1362
- logger.info(f"LangGraph available: {LANGGRAPH_AVAILABLE}")
1363
-
1364
- # Load model immediately on startup
1365
- logger.info("Loading model from Gaston895/Aegisecon1...")
1366
- model_loaded = load_model()
1367
-
1368
- if model_loaded:
1369
- logger.info("✅ Model loaded successfully, starting server...")
1370
- else:
1371
- logger.error("❌ Model failed to load. Starting server anyway, but model endpoints will fail.")
1372
-
1373
- app.run(host='0.0.0.0', port=7860, debug=False)
1374
- else:
1375
- # For production deployment (Gunicorn), load model when module is imported
1376
- logger.info("Production mode: Loading model during module import...")
1377
- logger.info(f"LangGraph available: {LANGGRAPH_AVAILABLE}")
1378
-
1379
- # Try to load model, but don't fail if it doesn't work (like the working version)
1380
- logger.info("Attempting to load model...")
1381
- model_loaded = load_model()
1382
-
1383
- if model_loaded:
1384
- logger.info("✅ Model loaded successfully for production!")
1385
- else:
1386
- logger.warning("⚠️ Model failed to load, but server will start anyway. Model can be loaded via /load_model_manual endpoint.")
 
1
  #!/usr/bin/env python3
2
  """
3
  Enhanced Flask App with LangGraph + AEGIS Economics AI
4
+ CPU-optimized version with memory/timeout fixes
 
5
  """
6
 
7
  from flask import Flask, request, jsonify, render_template_string
 
13
  from datetime import datetime
14
  from typing import Dict, List, Any, Optional
15
  from dataclasses import dataclass
16
+ import time
17
+ import gc
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  # Configure logging
20
  logging.basicConfig(level=logging.INFO)
21
  logger = logging.getLogger(__name__)
22
 
23
+ # Disable parallelism to reduce memory usage
24
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
25
+
26
  app = Flask(__name__)
27
 
28
  # Global variables
29
  model = None
30
  tokenizer = None
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  @dataclass
33
  class TechScores:
 
63
  threats.append(threat)
64
  return threats
65
 
66
+ def cleanup_memory():
67
+ """Clean up memory aggressively"""
68
+ try:
69
+ gc.collect()
70
+ if torch.cuda.is_available():
71
+ torch.cuda.empty_cache()
72
+ torch.cuda.synchronize()
73
+ except Exception as e:
74
+ logger.warning(f"Memory cleanup warning: {e}")
 
 
 
 
 
75
 
76
  class LangGraphProcessor:
77
+ """Simplified LangGraph-based tech score processor"""
78
 
79
  def __init__(self):
80
  self.graph = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  def _ai_threat_analysis(self, score: float) -> str:
83
  if score >= 0.8: return "AGI/Singularity risk, massive economic disruption"
 
115
  elif score >= 0.4: return "Space security concerns, increased space militarization"
116
  else: return "Stable space environment, continued commercial growth"
117
 
118
+ def _threat_level_description(self, total_threat: float) -> str:
119
+ if total_threat >= 0.8: return "CRITICAL"
120
+ elif total_threat >= 0.6: return "HIGH"
121
+ elif total_threat >= 0.4: return "MODERATE"
122
+ elif total_threat >= 0.2: return "LOW"
123
+ else: return "MINIMAL"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  def process_tech_scores(self, tech_scores: TechScores) -> Dict[str, Any]:
126
+ """Process tech scores with simplified analysis"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  total_threat = tech_scores.get_total_threat_level()
128
  dominant_threats = tech_scores.get_dominant_threats()
129
 
130
+ analysis = f"""TECHNOLOGY THREAT ANALYSIS (Year {tech_scores.year}):
131
 
132
+ Overall Threat Level: {total_threat:.3f} ({self._threat_level_description(total_threat)})
133
+ Dominant Threats: {', '.join(dominant_threats) if dominant_threats else 'None above threshold'}
 
 
 
 
134
 
135
+ Detailed Analysis:
136
+ - AI: {tech_scores.ai:.3f} - {self._ai_threat_analysis(tech_scores.ai)}
137
+ - Cyber: {tech_scores.cyber:.3f} - {self._cyber_threat_analysis(tech_scores.cyber)}
138
+ - Bio: {tech_scores.bio:.3f} - {self._bio_threat_analysis(tech_scores.bio)}
139
+ - Nuclear: {tech_scores.nuclear:.3f} - {self._nuclear_threat_analysis(tech_scores.nuclear)}
140
+ - Climate: {tech_scores.climate:.3f} - {self._climate_threat_analysis(tech_scores.climate)}
141
+ - Space: {tech_scores.space:.3f} - {self._space_threat_analysis(tech_scores.space)}"""
142
+
143
+ simplified_prompt = f"""{analysis}
144
 
145
+ Based on these technology threat scores, provide an economic analysis with:
146
+ 1. Market shock index (0-1)
147
+ 2. GDP impact projections
148
+ 3. Key policy recommendations
149
+ 4. Investment implications
 
 
150
 
151
+ Keep the analysis concise and actionable."""
152
 
153
  return {
154
  'success': True,
155
  'final_prompt': simplified_prompt,
156
+ 'processing_steps': ['Simplified analysis completed'],
157
  'metadata': {
158
  'total_threat_level': total_threat,
159
  'dominant_threats': dominant_threats,
160
+ 'processing_timestamp': datetime.now().isoformat(),
161
  'processing_mode': 'simplified'
162
  }
163
  }
164
 
165
+ # Initialize processor
166
+ processor = LangGraphProcessor()
167
 
168
  def load_model():
169
+ """Load the model with aggressive memory optimizations"""
170
+ global model, tokenizer
171
 
172
  try:
173
+ logger.info("🔄 Loading model from Gaston895/Aegisecon1...")
174
 
175
+ # Use a smaller model variant for testing if the main one is too large
176
  model_repo = "Gaston895/Aegisecon1"
177
 
178
+ # First try loading with aggressive optimizations
179
  tokenizer = AutoTokenizer.from_pretrained(
180
  model_repo,
181
  trust_remote_code=True,
182
  use_auth_token=False
183
  )
184
 
185
+ # Fix pad token
186
  if tokenizer.pad_token is None:
187
  tokenizer.pad_token = tokenizer.eos_token
 
188
 
189
+ # Load model with maximum CPU optimizations
190
  model = AutoModelForCausalLM.from_pretrained(
191
  model_repo,
192
+ torch_dtype=torch.float32, # Use float32 for CPU stability
193
+ device_map="cpu",
194
  trust_remote_code=True,
195
  use_auth_token=False,
196
+ low_cpu_mem_usage=True,
197
+ offload_folder="./offload", # Offload to disk if needed
198
+ offload_state_dict=True
199
  )
200
 
201
+ # Set to eval mode
202
+ model.eval()
203
 
204
+ logger.info("Model loaded successfully!")
205
+ logger.info(f"Model size: {sum(p.numel() for p in model.parameters()):,} parameters")
 
206
 
207
  return True
208
 
209
  except Exception as e:
210
+ logger.error(f" Model loading failed: {e}")
211
+
212
+ # Try loading a much smaller model as fallback
213
  try:
214
+ logger.info("🔄 Trying fallback model (Qwen2-1.5B)...")
215
  tokenizer = AutoTokenizer.from_pretrained(
216
+ "Qwen/Qwen2-1.5B",
217
  trust_remote_code=True
218
  )
 
219
  if tokenizer.pad_token is None:
220
  tokenizer.pad_token = tokenizer.eos_token
221
+
222
  model = AutoModelForCausalLM.from_pretrained(
223
  "Qwen/Qwen2-1.5B",
224
+ torch_dtype=torch.float32,
225
  device_map="cpu",
226
  trust_remote_code=True,
227
  low_cpu_mem_usage=True
228
  )
229
+ model.eval()
230
+
231
+ logger.info("✅ Fallback model loaded successfully!")
232
  return True
233
+
234
  except Exception as e2:
235
+ logger.error(f"Fallback also failed: {e2}")
236
  return False
237
 
238
+ def generate_response(prompt, max_time=20):
239
+ """Generate response with strict timeout and memory limits"""
240
+ start_time = time.time()
241
+
242
  try:
243
  if model is None or tokenizer is None:
244
+ return "Model not available. Please try the /health endpoint to check status."
245
 
246
+ # Check timeout
247
+ if time.time() - start_time > max_time:
248
+ return "Response generation timed out. Please try with a shorter query."
 
249
 
250
+ # Clean memory before generation
251
+ cleanup_memory()
252
+
253
+ # Prepare prompt
254
+ system_prompt = "You are AEGIS Economics AI, an expert economic analyst."
255
+ full_prompt = f"{system_prompt}\n\n{prompt}\n\nAssistant:"
256
 
257
+ # Tokenize with very conservative limits
258
  inputs = tokenizer(
259
  full_prompt,
260
  return_tensors="pt",
261
  truncation=True,
262
+ max_length=512, # Reduced from 1024
263
+ padding=True
 
264
  )
265
 
266
+ # Generate with conservative settings for CPU
267
  with torch.no_grad():
268
  outputs = model.generate(
269
  inputs.input_ids,
270
+ max_new_tokens=150, # Reduced from 256
271
+ temperature=0.7,
272
+ do_sample=False, # Greedy decoding for speed
273
+ pad_token_id=tokenizer.pad_token_id,
 
274
  eos_token_id=tokenizer.eos_token_id,
275
+ repetition_penalty=1.05, # Reduced penalty
276
+ no_repeat_ngram_size=2,
277
+ num_beams=1, # Single beam for speed
278
+ early_stopping=True
279
  )
280
 
281
+ # Decode response
282
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
283
 
284
+ # Extract assistant response
285
  if "Assistant:" in response:
286
  response = response.split("Assistant:")[-1].strip()
287
 
288
+ # Check total time
289
+ total_time = time.time() - start_time
290
+ logger.info(f"Generation completed in {total_time:.1f}s")
291
+
292
+ # Clean memory after generation
293
+ cleanup_memory()
294
+
295
  return response
296
 
297
+ except torch.cuda.OutOfMemoryError:
298
+ return "Out of memory error. The model is too large for this environment."
299
  except Exception as e:
300
+ logger.error(f"Generation error: {e}")
301
+ return "Sorry, I encountered an error. Please try again."
302
+
303
+ # Load model immediately
304
+ model_loaded = load_model()
305
 
 
306
  HTML_TEMPLATE = """
307
  <!DOCTYPE html>
308
  <html>
309
  <head>
310
+ <title>AEGIS Economics AI</title>
311
  <meta charset="utf-8">
312
  <meta name="viewport" content="width=device-width, initial-scale=1">
313
  <style>
314
  body { font-family: Arial, sans-serif; margin: 0; padding: 20px; background: #f5f5f5; }
315
  .container { max-width: 900px; margin: 0 auto; background: white; padding: 20px; border-radius: 10px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
316
+ .header { text-align: center; margin-bottom: 20px; }
 
317
  .chat-container { border: 1px solid #ddd; border-radius: 5px; height: 400px; overflow-y: auto; padding: 10px; margin-bottom: 20px; background: #fafafa; }
318
+ .message { margin: 10px 0; padding: 10px; border-radius: 5px; max-width: 80%; }
319
+ .user-message { background: #007bff; color: white; margin-left: auto; }
320
+ .ai-message { background: #e9ecef; color: #333; margin-right: auto; }
 
321
  .input-group { display: flex; gap: 10px; }
322
  .input-field { flex: 1; padding: 10px; border: 1px solid #ddd; border-radius: 5px; }
323
  .send-btn { padding: 10px 20px; background: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer; }
324
+ .send-btn:disabled { background: #ccc; cursor: not-allowed; }
325
  .loading { text-align: center; color: #666; font-style: italic; }
326
+ .status { padding: 10px; border-radius: 5px; margin-bottom: 15px; text-align: center; }
327
+ .status-good { background: #d4edda; color: #155724; }
328
+ .status-warning { background: #fff3cd; color: #856404; }
329
+ .status-error { background: #f8d7da; color: #721c24; }
330
  </style>
331
  </head>
332
  <body>
333
  <div class="container">
334
  <div class="header">
335
+ <h1>🏛️ AEGIS Economics AI</h1>
336
+ <p>Economic Analysis with Technology Threat Assessment</p>
337
+ <div id="status" class="status status-warning">Checking status...</div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  </div>
339
 
340
  <div id="chat-container" class="chat-container">
341
  <div class="message ai-message">
342
+ Welcome! I'm AEGIS Economics AI. I can help analyze technology threats and their economic impacts.
343
  <br><br>
344
+ <strong>Try asking:</strong><br>
345
+ "Analyze AI threat level 0.7 for economic impact"<br>
346
+ "What are the economic risks of cyber threats?"<br>
347
+ "How does climate change affect global markets?"<br>
 
 
348
  </div>
349
  </div>
350
 
351
  <div class="input-group">
352
+ <input type="text" id="user-input" class="input-field" placeholder="Ask about economics or technology threats..." onkeypress="handleKeyPress(event)">
353
+ <button id="send-btn" onclick="sendMessage()" class="send-btn">Send</button>
354
  </div>
355
  </div>
356
 
357
  <script>
358
+ function updateStatus(data) {
359
+ const status = document.getElementById('status');
360
+ if (data.model_loaded) {
361
+ status.className = 'status status-good';
362
+ status.innerHTML = '✅ Model loaded and ready!';
363
+ document.getElementById('send-btn').disabled = false;
364
+ } else {
365
+ status.className = 'status status-error';
366
+ status.innerHTML = '❌ Model not loaded. Please refresh or check logs.';
367
+ document.getElementById('send-btn').disabled = true;
368
+ }
369
+ }
370
+
371
+ async function checkStatus() {
372
  try {
373
  const response = await fetch('/health');
374
  const data = await response.json();
375
+ updateStatus(data);
 
 
 
 
 
 
 
 
376
  } catch (error) {
377
+ document.getElementById('status').className = 'status status-error';
378
+ document.getElementById('status').textContent = '❌ Connection error';
 
379
  }
380
  }
381
 
382
+ window.onload = checkStatus;
383
 
384
  function handleKeyPress(event) {
385
  if (event.key === 'Enter') {
 
391
  const chatContainer = document.getElementById('chat-container');
392
  const messageDiv = document.createElement('div');
393
  messageDiv.className = `message ${type}-message`;
394
+ messageDiv.innerHTML = content.replace(/\n/g, '<br>');
 
 
 
 
 
 
395
  chatContainer.appendChild(messageDiv);
396
  chatContainer.scrollTop = chatContainer.scrollHeight;
397
  }
398
 
399
+ function showLoading() {
400
  const chatContainer = document.getElementById('chat-container');
401
  const loadingDiv = document.createElement('div');
402
  loadingDiv.className = 'loading';
403
  loadingDiv.id = 'loading';
404
+ loadingDiv.textContent = 'Thinking...';
405
  chatContainer.appendChild(loadingDiv);
406
  chatContainer.scrollTop = chatContainer.scrollHeight;
407
  }
408
 
409
  function hideLoading() {
410
  const loading = document.getElementById('loading');
411
+ if (loading) loading.remove();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412
  }
413
 
414
  async function sendMessage() {
415
  const input = document.getElementById('user-input');
416
  const message = input.value.trim();
 
417
  if (!message) return;
418
 
419
  addMessage(message, 'user');
 
433
  if (data.response) {
434
  addMessage(data.response, 'ai');
435
  } else {
436
+ addMessage('Sorry, I encountered an error.', 'ai');
437
  }
438
  } catch (error) {
439
  hideLoading();
 
452
 
453
  @app.route('/process_tech_scores', methods=['POST'])
454
  def process_tech_scores():
455
+ """Process technology scores"""
456
  try:
457
  data = request.get_json()
458
 
 
459
  tech_scores = TechScores(
460
  ai=data.get('ai', 0.0),
461
  cyber=data.get('cyber', 0.0),
 
466
  year=data.get('year', 2024)
467
  )
468
 
469
+ logger.info(f"Processing tech scores...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
470
 
471
+ # Process with simplified analysis
472
+ result = processor.process_tech_scores(tech_scores)
 
473
 
474
+ if not result['success']:
475
+ return jsonify({'success': False, 'error': 'Processing failed'})
 
476
 
477
+ # Generate analysis
478
+ final_analysis = generate_response(result['final_prompt'], max_time=15)
479
 
480
  return jsonify({
481
  'success': True,
482
+ 'processing_steps': result['processing_steps'],
483
+ 'final_analysis': final_analysis
 
 
484
  })
485
 
486
  except Exception as e:
487
+ logger.error(f"Error: {e}")
 
488
  return jsonify({'success': False, 'error': str(e)}), 500
489
 
490
  @app.route('/chat', methods=['POST'])
491
  def chat():
492
+ """Handle chat messages"""
493
  try:
494
  data = request.get_json()
495
  user_message = data.get('message', '')
496
 
497
  if not user_message:
498
+ return jsonify({'error': 'No message'}), 400
 
 
 
 
 
 
 
 
499
 
500
+ # Generate response with timeout
501
+ response = generate_response(user_message, max_time=15)
502
 
503
+ return jsonify({'response': response})
504
 
505
  except Exception as e:
506
+ logger.error(f"Chat error: {e}")
507
+ return jsonify({'error': 'Server error'}), 500
508
 
509
+ @app.route('/health')
510
+ def health():
511
+ """Health check"""
512
+ return jsonify({
513
+ 'status': 'ok',
514
+ 'model_loaded': model is not None,
515
+ 'timestamp': datetime.now().isoformat()
516
+ })
517
+
518
+ @app.route('/reload_model')
519
+ def reload_model():
520
+ """Reload model endpoint"""
521
+ global model, tokenizer
522
  try:
523
+ # Clear existing model
524
+ del model
525
+ del tokenizer
526
+ cleanup_memory()
527
 
528
+ # Reload
529
+ success = load_model()
530
  return jsonify({
531
  'success': success,
532
+ 'message': 'Model reloaded' if success else 'Reload failed'
 
 
533
  })
534
  except Exception as e:
 
535
  return jsonify({'success': False, 'error': str(e)}), 500
536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537
  if __name__ == '__main__':
538
+ logger.info("🚀 Starting AEGIS Economics AI...")
539
+ app.run(host='0.0.0.0', port=7860, debug=False, threaded=True)