Upload app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
"""
|
| 3 |
-
|
| 4 |
-
|
| 5 |
"""
|
| 6 |
|
| 7 |
from flask import Flask, request, jsonify, render_template_string
|
|
@@ -9,300 +9,18 @@ import torch
|
|
| 9 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 10 |
import os
|
| 11 |
import logging
|
| 12 |
-
import json
|
| 13 |
-
from datetime import datetime
|
| 14 |
-
from typing import Dict, List, Any, Optional
|
| 15 |
-
from dataclasses import dataclass
|
| 16 |
-
import time
|
| 17 |
-
import gc
|
| 18 |
|
| 19 |
# Configure logging
|
| 20 |
logging.basicConfig(level=logging.INFO)
|
| 21 |
logger = logging.getLogger(__name__)
|
| 22 |
|
| 23 |
-
# Disable parallelism to reduce memory usage
|
| 24 |
-
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 25 |
-
|
| 26 |
app = Flask(__name__)
|
| 27 |
|
| 28 |
-
# Global variables
|
| 29 |
model = None
|
| 30 |
tokenizer = None
|
| 31 |
|
| 32 |
-
|
| 33 |
-
class TechScores:
|
| 34 |
-
"""Technology threat scores structure"""
|
| 35 |
-
ai: float = 0.0
|
| 36 |
-
cyber: float = 0.0
|
| 37 |
-
bio: float = 0.0
|
| 38 |
-
nuclear: float = 0.0
|
| 39 |
-
climate: float = 0.0
|
| 40 |
-
space: float = 0.0
|
| 41 |
-
year: int = 2024
|
| 42 |
-
|
| 43 |
-
def to_dict(self) -> Dict[str, float]:
|
| 44 |
-
return {
|
| 45 |
-
'AI': self.ai,
|
| 46 |
-
'Cyber': self.cyber,
|
| 47 |
-
'Bio': self.bio,
|
| 48 |
-
'Nuclear': self.nuclear,
|
| 49 |
-
'Climate': self.climate,
|
| 50 |
-
'Space': self.space
|
| 51 |
-
}
|
| 52 |
-
|
| 53 |
-
def get_total_threat_level(self) -> float:
|
| 54 |
-
"""Calculate overall threat level"""
|
| 55 |
-
return (self.ai + self.cyber + self.bio + self.nuclear + self.climate + self.space) / 6
|
| 56 |
-
|
| 57 |
-
def get_dominant_threats(self, threshold: float = 0.6) -> List[str]:
|
| 58 |
-
"""Get threats above threshold"""
|
| 59 |
-
threats = []
|
| 60 |
-
scores = self.to_dict()
|
| 61 |
-
for threat, score in scores.items():
|
| 62 |
-
if score >= threshold:
|
| 63 |
-
threats.append(threat)
|
| 64 |
-
return threats
|
| 65 |
-
|
| 66 |
-
def cleanup_memory():
|
| 67 |
-
"""Clean up memory aggressively"""
|
| 68 |
-
try:
|
| 69 |
-
gc.collect()
|
| 70 |
-
if torch.cuda.is_available():
|
| 71 |
-
torch.cuda.empty_cache()
|
| 72 |
-
torch.cuda.synchronize()
|
| 73 |
-
except Exception as e:
|
| 74 |
-
logger.warning(f"Memory cleanup warning: {e}")
|
| 75 |
-
|
| 76 |
-
class LangGraphProcessor:
|
| 77 |
-
"""Simplified LangGraph-based tech score processor"""
|
| 78 |
-
|
| 79 |
-
def __init__(self):
|
| 80 |
-
self.graph = None
|
| 81 |
-
|
| 82 |
-
def _ai_threat_analysis(self, score: float) -> str:
|
| 83 |
-
if score >= 0.8: return "AGI/Singularity risk, massive economic disruption"
|
| 84 |
-
elif score >= 0.6: return "Advanced AI deployment, significant job displacement"
|
| 85 |
-
elif score >= 0.4: return "AI automation acceleration, sector-specific impacts"
|
| 86 |
-
else: return "Gradual AI integration, manageable transitions"
|
| 87 |
-
|
| 88 |
-
def _cyber_threat_analysis(self, score: float) -> str:
|
| 89 |
-
if score >= 0.8: return "Critical infrastructure at risk, potential economic paralysis"
|
| 90 |
-
elif score >= 0.6: return "Major cybersecurity incidents, financial system vulnerabilities"
|
| 91 |
-
elif score >= 0.4: return "Increased cyber attacks, business continuity risks"
|
| 92 |
-
else: return "Standard cyber threats, manageable with current defenses"
|
| 93 |
-
|
| 94 |
-
def _bio_threat_analysis(self, score: float) -> str:
|
| 95 |
-
if score >= 0.8: return "Pandemic-level biological threats, global economic shutdown risk"
|
| 96 |
-
elif score >= 0.6: return "Significant biological incidents, healthcare system strain"
|
| 97 |
-
elif score >= 0.4: return "Regional biological threats, supply chain disruptions"
|
| 98 |
-
else: return "Contained biological risks, minimal economic impact"
|
| 99 |
-
|
| 100 |
-
def _nuclear_threat_analysis(self, score: float) -> str:
|
| 101 |
-
if score >= 0.8: return "Nuclear conflict risk, catastrophic economic collapse"
|
| 102 |
-
elif score >= 0.6: return "Nuclear incidents, regional economic devastation"
|
| 103 |
-
elif score >= 0.4: return "Nuclear security concerns, defense spending increases"
|
| 104 |
-
else: return "Stable nuclear environment, minimal economic impact"
|
| 105 |
-
|
| 106 |
-
def _climate_threat_analysis(self, score: float) -> str:
|
| 107 |
-
if score >= 0.8: return "Climate catastrophe, fundamental economic restructuring needed"
|
| 108 |
-
elif score >= 0.6: return "Severe climate impacts, major adaptation costs"
|
| 109 |
-
elif score >= 0.4: return "Accelerating climate change, increasing economic pressures"
|
| 110 |
-
else: return "Manageable climate impacts, gradual adaptation"
|
| 111 |
-
|
| 112 |
-
def _space_threat_analysis(self, score: float) -> str:
|
| 113 |
-
if score >= 0.8: return "Space warfare/debris cascade, satellite infrastructure collapse"
|
| 114 |
-
elif score >= 0.6: return "Major space incidents, communication/GPS disruptions"
|
| 115 |
-
elif score >= 0.4: return "Space security concerns, increased space militarization"
|
| 116 |
-
else: return "Stable space environment, continued commercial growth"
|
| 117 |
-
|
| 118 |
-
def _threat_level_description(self, total_threat: float) -> str:
|
| 119 |
-
if total_threat >= 0.8: return "CRITICAL"
|
| 120 |
-
elif total_threat >= 0.6: return "HIGH"
|
| 121 |
-
elif total_threat >= 0.4: return "MODERATE"
|
| 122 |
-
elif total_threat >= 0.2: return "LOW"
|
| 123 |
-
else: return "MINIMAL"
|
| 124 |
-
|
| 125 |
-
def process_tech_scores(self, tech_scores: TechScores) -> Dict[str, Any]:
|
| 126 |
-
"""Process tech scores with simplified analysis"""
|
| 127 |
-
total_threat = tech_scores.get_total_threat_level()
|
| 128 |
-
dominant_threats = tech_scores.get_dominant_threats()
|
| 129 |
-
|
| 130 |
-
analysis = f"""TECHNOLOGY THREAT ANALYSIS (Year {tech_scores.year}):
|
| 131 |
-
|
| 132 |
-
Overall Threat Level: {total_threat:.3f} ({self._threat_level_description(total_threat)})
|
| 133 |
-
Dominant Threats: {', '.join(dominant_threats) if dominant_threats else 'None above threshold'}
|
| 134 |
-
|
| 135 |
-
Detailed Analysis:
|
| 136 |
-
- AI: {tech_scores.ai:.3f} - {self._ai_threat_analysis(tech_scores.ai)}
|
| 137 |
-
- Cyber: {tech_scores.cyber:.3f} - {self._cyber_threat_analysis(tech_scores.cyber)}
|
| 138 |
-
- Bio: {tech_scores.bio:.3f} - {self._bio_threat_analysis(tech_scores.bio)}
|
| 139 |
-
- Nuclear: {tech_scores.nuclear:.3f} - {self._nuclear_threat_analysis(tech_scores.nuclear)}
|
| 140 |
-
- Climate: {tech_scores.climate:.3f} - {self._climate_threat_analysis(tech_scores.climate)}
|
| 141 |
-
- Space: {tech_scores.space:.3f} - {self._space_threat_analysis(tech_scores.space)}"""
|
| 142 |
-
|
| 143 |
-
simplified_prompt = f"""{analysis}
|
| 144 |
-
|
| 145 |
-
Based on these technology threat scores, provide an economic analysis with:
|
| 146 |
-
1. Market shock index (0-1)
|
| 147 |
-
2. GDP impact projections
|
| 148 |
-
3. Key policy recommendations
|
| 149 |
-
4. Investment implications
|
| 150 |
-
|
| 151 |
-
Keep the analysis concise and actionable."""
|
| 152 |
-
|
| 153 |
-
return {
|
| 154 |
-
'success': True,
|
| 155 |
-
'final_prompt': simplified_prompt,
|
| 156 |
-
'processing_steps': ['Simplified analysis completed'],
|
| 157 |
-
'metadata': {
|
| 158 |
-
'total_threat_level': total_threat,
|
| 159 |
-
'dominant_threats': dominant_threats,
|
| 160 |
-
'processing_timestamp': datetime.now().isoformat(),
|
| 161 |
-
'processing_mode': 'simplified'
|
| 162 |
-
}
|
| 163 |
-
}
|
| 164 |
-
|
| 165 |
-
# Initialize processor
|
| 166 |
-
processor = LangGraphProcessor()
|
| 167 |
-
|
| 168 |
-
def load_model():
|
| 169 |
-
"""Load the model with aggressive memory optimizations"""
|
| 170 |
-
global model, tokenizer
|
| 171 |
-
|
| 172 |
-
try:
|
| 173 |
-
logger.info("🔄 Loading model from Gaston895/Aegisecon1...")
|
| 174 |
-
|
| 175 |
-
# Use a smaller model variant for testing if the main one is too large
|
| 176 |
-
model_repo = "Gaston895/Aegisecon1"
|
| 177 |
-
|
| 178 |
-
# First try loading with aggressive optimizations
|
| 179 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 180 |
-
model_repo,
|
| 181 |
-
trust_remote_code=True,
|
| 182 |
-
use_auth_token=False
|
| 183 |
-
)
|
| 184 |
-
|
| 185 |
-
# Fix pad token
|
| 186 |
-
if tokenizer.pad_token is None:
|
| 187 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 188 |
-
|
| 189 |
-
# Load model with maximum CPU optimizations
|
| 190 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 191 |
-
model_repo,
|
| 192 |
-
torch_dtype=torch.float32, # Use float32 for CPU stability
|
| 193 |
-
device_map="cpu",
|
| 194 |
-
trust_remote_code=True,
|
| 195 |
-
use_auth_token=False,
|
| 196 |
-
low_cpu_mem_usage=True,
|
| 197 |
-
offload_folder="./offload", # Offload to disk if needed
|
| 198 |
-
offload_state_dict=True
|
| 199 |
-
)
|
| 200 |
-
|
| 201 |
-
# Set to eval mode
|
| 202 |
-
model.eval()
|
| 203 |
-
|
| 204 |
-
logger.info("✅ Model loaded successfully!")
|
| 205 |
-
logger.info(f"Model size: {sum(p.numel() for p in model.parameters()):,} parameters")
|
| 206 |
-
|
| 207 |
-
return True
|
| 208 |
-
|
| 209 |
-
except Exception as e:
|
| 210 |
-
logger.error(f"❌ Model loading failed: {e}")
|
| 211 |
-
|
| 212 |
-
# Try loading a much smaller model as fallback
|
| 213 |
-
try:
|
| 214 |
-
logger.info("🔄 Trying fallback model (Qwen2-1.5B)...")
|
| 215 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 216 |
-
"Qwen/Qwen2-1.5B",
|
| 217 |
-
trust_remote_code=True
|
| 218 |
-
)
|
| 219 |
-
if tokenizer.pad_token is None:
|
| 220 |
-
tokenizer.pad_token = tokenizer.eos_token
|
| 221 |
-
|
| 222 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 223 |
-
"Qwen/Qwen2-1.5B",
|
| 224 |
-
torch_dtype=torch.float32,
|
| 225 |
-
device_map="cpu",
|
| 226 |
-
trust_remote_code=True,
|
| 227 |
-
low_cpu_mem_usage=True
|
| 228 |
-
)
|
| 229 |
-
model.eval()
|
| 230 |
-
|
| 231 |
-
logger.info("✅ Fallback model loaded successfully!")
|
| 232 |
-
return True
|
| 233 |
-
|
| 234 |
-
except Exception as e2:
|
| 235 |
-
logger.error(f"❌ Fallback also failed: {e2}")
|
| 236 |
-
return False
|
| 237 |
-
|
| 238 |
-
def generate_response(prompt, max_time=20):
|
| 239 |
-
"""Generate response with strict timeout and memory limits"""
|
| 240 |
-
start_time = time.time()
|
| 241 |
-
|
| 242 |
-
try:
|
| 243 |
-
if model is None or tokenizer is None:
|
| 244 |
-
return "Model not available. Please try the /health endpoint to check status."
|
| 245 |
-
|
| 246 |
-
# Check timeout
|
| 247 |
-
if time.time() - start_time > max_time:
|
| 248 |
-
return "Response generation timed out. Please try with a shorter query."
|
| 249 |
-
|
| 250 |
-
# Clean memory before generation
|
| 251 |
-
cleanup_memory()
|
| 252 |
-
|
| 253 |
-
# Prepare prompt
|
| 254 |
-
system_prompt = "You are AEGIS Economics AI, an expert economic analyst."
|
| 255 |
-
full_prompt = f"{system_prompt}\n\n{prompt}\n\nAssistant:"
|
| 256 |
-
|
| 257 |
-
# Tokenize with very conservative limits
|
| 258 |
-
inputs = tokenizer(
|
| 259 |
-
full_prompt,
|
| 260 |
-
return_tensors="pt",
|
| 261 |
-
truncation=True,
|
| 262 |
-
max_length=512, # Reduced from 1024
|
| 263 |
-
padding=True
|
| 264 |
-
)
|
| 265 |
-
|
| 266 |
-
# Generate with conservative settings for CPU
|
| 267 |
-
with torch.no_grad():
|
| 268 |
-
outputs = model.generate(
|
| 269 |
-
inputs.input_ids,
|
| 270 |
-
max_new_tokens=150, # Reduced from 256
|
| 271 |
-
temperature=0.7,
|
| 272 |
-
do_sample=False, # Greedy decoding for speed
|
| 273 |
-
pad_token_id=tokenizer.pad_token_id,
|
| 274 |
-
eos_token_id=tokenizer.eos_token_id,
|
| 275 |
-
repetition_penalty=1.05, # Reduced penalty
|
| 276 |
-
no_repeat_ngram_size=2,
|
| 277 |
-
num_beams=1, # Single beam for speed
|
| 278 |
-
early_stopping=True
|
| 279 |
-
)
|
| 280 |
-
|
| 281 |
-
# Decode response
|
| 282 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 283 |
-
|
| 284 |
-
# Extract assistant response
|
| 285 |
-
if "Assistant:" in response:
|
| 286 |
-
response = response.split("Assistant:")[-1].strip()
|
| 287 |
-
|
| 288 |
-
# Check total time
|
| 289 |
-
total_time = time.time() - start_time
|
| 290 |
-
logger.info(f"Generation completed in {total_time:.1f}s")
|
| 291 |
-
|
| 292 |
-
# Clean memory after generation
|
| 293 |
-
cleanup_memory()
|
| 294 |
-
|
| 295 |
-
return response
|
| 296 |
-
|
| 297 |
-
except torch.cuda.OutOfMemoryError:
|
| 298 |
-
return "Out of memory error. The model is too large for this environment."
|
| 299 |
-
except Exception as e:
|
| 300 |
-
logger.error(f"Generation error: {e}")
|
| 301 |
-
return "Sorry, I encountered an error. Please try again."
|
| 302 |
-
|
| 303 |
-
# Load model immediately
|
| 304 |
-
model_loaded = load_model()
|
| 305 |
-
|
| 306 |
HTML_TEMPLATE = """
|
| 307 |
<!DOCTYPE html>
|
| 308 |
<html>
|
|
@@ -312,74 +30,84 @@ HTML_TEMPLATE = """
|
|
| 312 |
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 313 |
<style>
|
| 314 |
body { font-family: Arial, sans-serif; margin: 0; padding: 20px; background: #f5f5f5; }
|
| 315 |
-
.container { max-width:
|
| 316 |
-
.header { text-align: center; margin-bottom:
|
| 317 |
.chat-container { border: 1px solid #ddd; border-radius: 5px; height: 400px; overflow-y: auto; padding: 10px; margin-bottom: 20px; background: #fafafa; }
|
| 318 |
-
.message { margin: 10px 0; padding: 10px; border-radius: 5px;
|
| 319 |
-
.user-message { background: #007bff; color: white; margin-left:
|
| 320 |
-
.ai-message { background: #e9ecef; color: #333; margin-right:
|
| 321 |
.input-group { display: flex; gap: 10px; }
|
| 322 |
.input-field { flex: 1; padding: 10px; border: 1px solid #ddd; border-radius: 5px; }
|
| 323 |
.send-btn { padding: 10px 20px; background: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer; }
|
| 324 |
-
.send-btn:
|
| 325 |
.loading { text-align: center; color: #666; font-style: italic; }
|
| 326 |
-
.status { padding: 10px; border-radius: 5px; margin-bottom: 15px; text-align: center; }
|
| 327 |
-
.status-good { background: #d4edda; color: #155724; }
|
| 328 |
-
.status-warning { background: #fff3cd; color: #856404; }
|
| 329 |
-
.status-error { background: #f8d7da; color: #721c24; }
|
| 330 |
</style>
|
| 331 |
</head>
|
| 332 |
<body>
|
| 333 |
<div class="container">
|
| 334 |
<div class="header">
|
| 335 |
<h1>🏛️ AEGIS Economics AI</h1>
|
| 336 |
-
<p>Economic Analysis
|
| 337 |
-
<div id="status" class="status status-warning">Checking status...</div>
|
| 338 |
</div>
|
| 339 |
|
| 340 |
<div id="chat-container" class="chat-container">
|
| 341 |
<div class="message ai-message">
|
| 342 |
-
|
| 343 |
-
<
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
• "What are the economic risks of cyber threats?"<br>
|
| 347 |
-
• "How does climate change affect global markets?"<br>
|
| 348 |
</div>
|
| 349 |
</div>
|
| 350 |
|
| 351 |
<div class="input-group">
|
| 352 |
-
<input type="text" id="user-input" class="input-field" placeholder="Ask about economics
|
| 353 |
-
<button
|
| 354 |
</div>
|
| 355 |
</div>
|
| 356 |
|
| 357 |
<script>
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
document.getElementById('
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
}
|
| 369 |
}
|
| 370 |
|
| 371 |
-
async function
|
| 372 |
try {
|
| 373 |
-
const response = await fetch('/
|
| 374 |
const data = await response.json();
|
| 375 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
} catch (error) {
|
| 377 |
-
|
| 378 |
-
document.getElementById('status').textContent = '❌ Connection error';
|
| 379 |
}
|
| 380 |
}
|
| 381 |
|
| 382 |
-
|
|
|
|
| 383 |
|
| 384 |
function handleKeyPress(event) {
|
| 385 |
if (event.key === 'Enter') {
|
|
@@ -387,15 +115,11 @@ HTML_TEMPLATE = """
|
|
| 387 |
}
|
| 388 |
}
|
| 389 |
|
| 390 |
-
function addMessage(content,
|
| 391 |
const chatContainer = document.getElementById('chat-container');
|
| 392 |
const messageDiv = document.createElement('div');
|
| 393 |
-
messageDiv.className = `message ${
|
| 394 |
-
|
| 395 |
-
// Safely handle newlines without regex
|
| 396 |
-
const lines = content.split('\\n');
|
| 397 |
-
messageDiv.innerHTML = lines.join('<br>');
|
| 398 |
-
|
| 399 |
chatContainer.appendChild(messageDiv);
|
| 400 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
| 401 |
}
|
|
@@ -405,29 +129,34 @@ HTML_TEMPLATE = """
|
|
| 405 |
const loadingDiv = document.createElement('div');
|
| 406 |
loadingDiv.className = 'loading';
|
| 407 |
loadingDiv.id = 'loading';
|
| 408 |
-
loadingDiv.textContent = '
|
| 409 |
chatContainer.appendChild(loadingDiv);
|
| 410 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
| 411 |
}
|
| 412 |
|
| 413 |
function hideLoading() {
|
| 414 |
const loading = document.getElementById('loading');
|
| 415 |
-
if (loading)
|
|
|
|
|
|
|
| 416 |
}
|
| 417 |
|
| 418 |
async function sendMessage() {
|
| 419 |
const input = document.getElementById('user-input');
|
| 420 |
const message = input.value.trim();
|
|
|
|
| 421 |
if (!message) return;
|
| 422 |
|
| 423 |
-
addMessage(message,
|
| 424 |
input.value = '';
|
| 425 |
showLoading();
|
| 426 |
|
| 427 |
try {
|
| 428 |
const response = await fetch('/chat', {
|
| 429 |
method: 'POST',
|
| 430 |
-
headers: {
|
|
|
|
|
|
|
| 431 |
body: JSON.stringify({ message: message })
|
| 432 |
});
|
| 433 |
|
|
@@ -435,13 +164,13 @@ HTML_TEMPLATE = """
|
|
| 435 |
hideLoading();
|
| 436 |
|
| 437 |
if (data.response) {
|
| 438 |
-
addMessage(data.response,
|
| 439 |
} else {
|
| 440 |
-
addMessage('Sorry, I encountered an error.',
|
| 441 |
}
|
| 442 |
} catch (error) {
|
| 443 |
hideLoading();
|
| 444 |
-
addMessage('Connection error. Please try again.',
|
| 445 |
}
|
| 446 |
}
|
| 447 |
</script>
|
|
@@ -449,47 +178,103 @@ HTML_TEMPLATE = """
|
|
| 449 |
</html>
|
| 450 |
"""
|
| 451 |
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
@app.route('/process_tech_scores', methods=['POST'])
|
| 458 |
-
def process_tech_scores():
|
| 459 |
-
"""Process technology scores"""
|
| 460 |
try:
|
| 461 |
-
|
| 462 |
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
)
|
| 472 |
|
| 473 |
-
logger.info(
|
|
|
|
| 474 |
|
| 475 |
-
|
| 476 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 477 |
|
| 478 |
-
|
| 479 |
-
|
|
|
|
|
|
|
| 480 |
|
| 481 |
-
|
| 482 |
-
final_analysis = generate_response(result['final_prompt'], max_time=15)
|
| 483 |
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 489 |
|
| 490 |
except Exception as e:
|
| 491 |
-
logger.error(f"Error: {e}")
|
| 492 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 493 |
|
| 494 |
@app.route('/chat', methods=['POST'])
|
| 495 |
def chat():
|
|
@@ -499,45 +284,51 @@ def chat():
|
|
| 499 |
user_message = data.get('message', '')
|
| 500 |
|
| 501 |
if not user_message:
|
| 502 |
-
return jsonify({'error': 'No message'}), 400
|
| 503 |
|
| 504 |
-
# Generate
|
| 505 |
-
|
| 506 |
|
| 507 |
-
return jsonify({'response':
|
| 508 |
|
| 509 |
except Exception as e:
|
| 510 |
-
logger.error(f"
|
| 511 |
-
return jsonify({'error': '
|
| 512 |
|
| 513 |
@app.route('/health')
|
| 514 |
def health():
|
| 515 |
-
"""Health check"""
|
| 516 |
return jsonify({
|
| 517 |
-
'status': '
|
| 518 |
'model_loaded': model is not None,
|
| 519 |
-
'
|
|
|
|
| 520 |
})
|
| 521 |
|
| 522 |
-
@app.route('/
|
| 523 |
-
def
|
| 524 |
-
"""
|
| 525 |
-
global model, tokenizer
|
| 526 |
try:
|
| 527 |
-
# Clear existing model
|
| 528 |
-
del model
|
| 529 |
-
del tokenizer
|
| 530 |
-
cleanup_memory()
|
| 531 |
-
|
| 532 |
-
# Reload
|
| 533 |
success = load_model()
|
| 534 |
return jsonify({
|
| 535 |
'success': success,
|
| 536 |
-
'
|
|
|
|
| 537 |
})
|
| 538 |
except Exception as e:
|
| 539 |
-
return jsonify({'
|
| 540 |
|
| 541 |
if __name__ == '__main__':
|
| 542 |
-
|
| 543 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
"""
|
| 3 |
+
Flask App with Gunicorn for Deep Modal Files
|
| 4 |
+
Economics Chat Application using Qwen2 model
|
| 5 |
"""
|
| 6 |
|
| 7 |
from flask import Flask, request, jsonify, render_template_string
|
|
|
|
| 9 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 10 |
import os
|
| 11 |
import logging
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# Configure logging
|
| 14 |
logging.basicConfig(level=logging.INFO)
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
app = Flask(__name__)
|
| 18 |
|
| 19 |
+
# Global variables for model and tokenizer
|
| 20 |
model = None
|
| 21 |
tokenizer = None
|
| 22 |
|
| 23 |
+
# HTML template
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
HTML_TEMPLATE = """
|
| 25 |
<!DOCTYPE html>
|
| 26 |
<html>
|
|
|
|
| 30 |
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 31 |
<style>
|
| 32 |
body { font-family: Arial, sans-serif; margin: 0; padding: 20px; background: #f5f5f5; }
|
| 33 |
+
.container { max-width: 800px; margin: 0 auto; background: white; padding: 20px; border-radius: 10px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
|
| 34 |
+
.header { text-align: center; margin-bottom: 30px; }
|
| 35 |
.chat-container { border: 1px solid #ddd; border-radius: 5px; height: 400px; overflow-y: auto; padding: 10px; margin-bottom: 20px; background: #fafafa; }
|
| 36 |
+
.message { margin: 10px 0; padding: 10px; border-radius: 5px; }
|
| 37 |
+
.user-message { background: #007bff; color: white; margin-left: 20%; }
|
| 38 |
+
.ai-message { background: #e9ecef; color: #333; margin-right: 20%; }
|
| 39 |
.input-group { display: flex; gap: 10px; }
|
| 40 |
.input-field { flex: 1; padding: 10px; border: 1px solid #ddd; border-radius: 5px; }
|
| 41 |
.send-btn { padding: 10px 20px; background: #007bff; color: white; border: none; border-radius: 5px; cursor: pointer; }
|
| 42 |
+
.send-btn:hover { background: #0056b3; }
|
| 43 |
.loading { text-align: center; color: #666; font-style: italic; }
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
</style>
|
| 45 |
</head>
|
| 46 |
<body>
|
| 47 |
<div class="container">
|
| 48 |
<div class="header">
|
| 49 |
<h1>🏛️ AEGIS Economics AI</h1>
|
| 50 |
+
<p>Advanced Economic Analysis & Policy Insights</p>
|
|
|
|
| 51 |
</div>
|
| 52 |
|
| 53 |
<div id="chat-container" class="chat-container">
|
| 54 |
<div class="message ai-message">
|
| 55 |
+
Hello! I'm AEGIS Economics AI. Ask me about economic policies, market analysis, or financial strategies.
|
| 56 |
+
<div id="model-status" style="font-size: 0.8em; color: #666; margin-top: 5px;">
|
| 57 |
+
Checking model status...
|
| 58 |
+
</div>
|
|
|
|
|
|
|
| 59 |
</div>
|
| 60 |
</div>
|
| 61 |
|
| 62 |
<div class="input-group">
|
| 63 |
+
<input type="text" id="user-input" class="input-field" placeholder="Ask about economics, policy, markets..." onkeypress="handleKeyPress(event)">
|
| 64 |
+
<button onclick="sendMessage()" class="send-btn">Send</button>
|
| 65 |
</div>
|
| 66 |
</div>
|
| 67 |
|
| 68 |
<script>
|
| 69 |
+
// Check model status on page load
|
| 70 |
+
async function checkModelStatus() {
|
| 71 |
+
try {
|
| 72 |
+
const response = await fetch('/health');
|
| 73 |
+
const data = await response.json();
|
| 74 |
+
const statusDiv = document.getElementById('model-status');
|
| 75 |
+
|
| 76 |
+
if (data.model_loaded) {
|
| 77 |
+
statusDiv.textContent = '✅ Model loaded and ready!';
|
| 78 |
+
statusDiv.style.color = '#28a745';
|
| 79 |
+
} else {
|
| 80 |
+
statusDiv.textContent = '⏳ Model loading... Please wait.';
|
| 81 |
+
statusDiv.style.color = '#ffc107';
|
| 82 |
+
// Try to load model
|
| 83 |
+
setTimeout(tryLoadModel, 2000);
|
| 84 |
+
}
|
| 85 |
+
} catch (error) {
|
| 86 |
+
const statusDiv = document.getElementById('model-status');
|
| 87 |
+
statusDiv.textContent = '❌ Connection error';
|
| 88 |
+
statusDiv.style.color = '#dc3545';
|
| 89 |
}
|
| 90 |
}
|
| 91 |
|
| 92 |
+
async function tryLoadModel() {
|
| 93 |
try {
|
| 94 |
+
const response = await fetch('/load_model', { method: 'POST' });
|
| 95 |
const data = await response.json();
|
| 96 |
+
|
| 97 |
+
if (data.success) {
|
| 98 |
+
const statusDiv = document.getElementById('model-status');
|
| 99 |
+
statusDiv.textContent = '✅ Model loaded successfully!';
|
| 100 |
+
statusDiv.style.color = '#28a745';
|
| 101 |
+
} else {
|
| 102 |
+
setTimeout(checkModelStatus, 5000); // Check again in 5 seconds
|
| 103 |
+
}
|
| 104 |
} catch (error) {
|
| 105 |
+
setTimeout(checkModelStatus, 5000);
|
|
|
|
| 106 |
}
|
| 107 |
}
|
| 108 |
|
| 109 |
+
// Call on page load
|
| 110 |
+
window.onload = checkModelStatus;
|
| 111 |
|
| 112 |
function handleKeyPress(event) {
|
| 113 |
if (event.key === 'Enter') {
|
|
|
|
| 115 |
}
|
| 116 |
}
|
| 117 |
|
| 118 |
+
function addMessage(content, isUser) {
|
| 119 |
const chatContainer = document.getElementById('chat-container');
|
| 120 |
const messageDiv = document.createElement('div');
|
| 121 |
+
messageDiv.className = `message ${isUser ? 'user-message' : 'ai-message'}`;
|
| 122 |
+
messageDiv.textContent = content;
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
chatContainer.appendChild(messageDiv);
|
| 124 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
| 125 |
}
|
|
|
|
| 129 |
const loadingDiv = document.createElement('div');
|
| 130 |
loadingDiv.className = 'loading';
|
| 131 |
loadingDiv.id = 'loading';
|
| 132 |
+
loadingDiv.textContent = 'AI is thinking...';
|
| 133 |
chatContainer.appendChild(loadingDiv);
|
| 134 |
chatContainer.scrollTop = chatContainer.scrollHeight;
|
| 135 |
}
|
| 136 |
|
| 137 |
function hideLoading() {
|
| 138 |
const loading = document.getElementById('loading');
|
| 139 |
+
if (loading) {
|
| 140 |
+
loading.remove();
|
| 141 |
+
}
|
| 142 |
}
|
| 143 |
|
| 144 |
async function sendMessage() {
|
| 145 |
const input = document.getElementById('user-input');
|
| 146 |
const message = input.value.trim();
|
| 147 |
+
|
| 148 |
if (!message) return;
|
| 149 |
|
| 150 |
+
addMessage(message, true);
|
| 151 |
input.value = '';
|
| 152 |
showLoading();
|
| 153 |
|
| 154 |
try {
|
| 155 |
const response = await fetch('/chat', {
|
| 156 |
method: 'POST',
|
| 157 |
+
headers: {
|
| 158 |
+
'Content-Type': 'application/json',
|
| 159 |
+
},
|
| 160 |
body: JSON.stringify({ message: message })
|
| 161 |
});
|
| 162 |
|
|
|
|
| 164 |
hideLoading();
|
| 165 |
|
| 166 |
if (data.response) {
|
| 167 |
+
addMessage(data.response, false);
|
| 168 |
} else {
|
| 169 |
+
addMessage('Sorry, I encountered an error. Please try again.', false);
|
| 170 |
}
|
| 171 |
} catch (error) {
|
| 172 |
hideLoading();
|
| 173 |
+
addMessage('Connection error. Please try again.', false);
|
| 174 |
}
|
| 175 |
}
|
| 176 |
</script>
|
|
|
|
| 178 |
</html>
|
| 179 |
"""
|
| 180 |
|
| 181 |
+
def load_model():
|
| 182 |
+
"""Load the Qwen2 model and tokenizer from HF repository"""
|
| 183 |
+
global model, tokenizer
|
| 184 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
try:
|
| 186 |
+
logger.info("Loading model and tokenizer from Hugging Face...")
|
| 187 |
|
| 188 |
+
# Load from the deployed model repository
|
| 189 |
+
model_repo = "Gaston895/Aegisecon1"
|
| 190 |
+
|
| 191 |
+
logger.info(f"Loading tokenizer from {model_repo}...")
|
| 192 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 193 |
+
model_repo,
|
| 194 |
+
trust_remote_code=True,
|
| 195 |
+
use_auth_token=False
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
logger.info(f"Loading model from {model_repo}...")
|
| 199 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 200 |
+
model_repo,
|
| 201 |
+
torch_dtype=torch.float16, # Changed from bfloat16 for better compatibility
|
| 202 |
+
device_map="cpu", # Force CPU for HF Spaces compatibility
|
| 203 |
+
trust_remote_code=True,
|
| 204 |
+
use_auth_token=False,
|
| 205 |
+
low_cpu_mem_usage=True
|
| 206 |
)
|
| 207 |
|
| 208 |
+
logger.info("Model loaded successfully from HF repository!")
|
| 209 |
+
return True
|
| 210 |
|
| 211 |
+
except Exception as e:
|
| 212 |
+
logger.error(f"Error loading model from HF: {str(e)}")
|
| 213 |
+
# Try alternative loading method
|
| 214 |
+
try:
|
| 215 |
+
logger.info("Trying alternative loading method...")
|
| 216 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 217 |
+
"Qwen/Qwen2-1.5B", # Fallback to base model
|
| 218 |
+
trust_remote_code=True
|
| 219 |
+
)
|
| 220 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 221 |
+
"Qwen/Qwen2-1.5B",
|
| 222 |
+
torch_dtype=torch.float16,
|
| 223 |
+
device_map="cpu",
|
| 224 |
+
trust_remote_code=True,
|
| 225 |
+
low_cpu_mem_usage=True
|
| 226 |
+
)
|
| 227 |
+
logger.info("Fallback model loaded successfully!")
|
| 228 |
+
return True
|
| 229 |
+
except Exception as e2:
|
| 230 |
+
logger.error(f"Fallback loading also failed: {str(e2)}")
|
| 231 |
+
return False
|
| 232 |
+
|
| 233 |
+
def generate_response(prompt):
|
| 234 |
+
"""Generate response using the loaded model"""
|
| 235 |
+
try:
|
| 236 |
+
if model is None or tokenizer is None:
|
| 237 |
+
return "Model is still loading, please wait a moment and try again..."
|
| 238 |
|
| 239 |
+
# Economics-focused system prompt
|
| 240 |
+
system_prompt = """You are AEGIS Economics AI, an expert economic analyst and policy advisor.
|
| 241 |
+
Provide clear, accurate, and insightful responses about economics, finance, markets, and policy.
|
| 242 |
+
Focus on practical analysis and actionable insights."""
|
| 243 |
|
| 244 |
+
full_prompt = f"{system_prompt}\n\nUser: {prompt}\nAssistant:"
|
|
|
|
| 245 |
|
| 246 |
+
# Tokenize input
|
| 247 |
+
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1024)
|
| 248 |
+
|
| 249 |
+
# Generate response
|
| 250 |
+
with torch.no_grad():
|
| 251 |
+
outputs = model.generate(
|
| 252 |
+
inputs.input_ids,
|
| 253 |
+
max_new_tokens=256, # Reduced for faster generation
|
| 254 |
+
temperature=0.7,
|
| 255 |
+
do_sample=True,
|
| 256 |
+
pad_token_id=tokenizer.eos_token_id,
|
| 257 |
+
repetition_penalty=1.1,
|
| 258 |
+
no_repeat_ngram_size=3
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
# Decode response
|
| 262 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 263 |
+
|
| 264 |
+
# Extract only the assistant's response
|
| 265 |
+
if "Assistant:" in response:
|
| 266 |
+
response = response.split("Assistant:")[-1].strip()
|
| 267 |
+
|
| 268 |
+
return response
|
| 269 |
|
| 270 |
except Exception as e:
|
| 271 |
+
logger.error(f"Error generating response: {str(e)}")
|
| 272 |
+
return "I apologize, but I'm having trouble processing your request right now. Please try again in a moment."
|
| 273 |
+
|
| 274 |
+
@app.route('/')
|
| 275 |
+
def home():
|
| 276 |
+
"""Serve the main chat interface"""
|
| 277 |
+
return render_template_string(HTML_TEMPLATE)
|
| 278 |
|
| 279 |
@app.route('/chat', methods=['POST'])
|
| 280 |
def chat():
|
|
|
|
| 284 |
user_message = data.get('message', '')
|
| 285 |
|
| 286 |
if not user_message:
|
| 287 |
+
return jsonify({'error': 'No message provided'}), 400
|
| 288 |
|
| 289 |
+
# Generate AI response
|
| 290 |
+
ai_response = generate_response(user_message)
|
| 291 |
|
| 292 |
+
return jsonify({'response': ai_response})
|
| 293 |
|
| 294 |
except Exception as e:
|
| 295 |
+
logger.error(f"Error in chat endpoint: {str(e)}")
|
| 296 |
+
return jsonify({'error': 'Internal server error'}), 500
|
| 297 |
|
| 298 |
@app.route('/health')
|
| 299 |
def health():
|
| 300 |
+
"""Health check endpoint"""
|
| 301 |
return jsonify({
|
| 302 |
+
'status': 'healthy',
|
| 303 |
'model_loaded': model is not None,
|
| 304 |
+
'tokenizer_loaded': tokenizer is not None,
|
| 305 |
+
'model_info': 'Gaston895/Aegisecon1' if model is not None else 'Not loaded'
|
| 306 |
})
|
| 307 |
|
| 308 |
+
@app.route('/load_model', methods=['POST'])
|
| 309 |
+
def load_model_endpoint():
|
| 310 |
+
"""Endpoint to trigger model loading"""
|
|
|
|
| 311 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 312 |
success = load_model()
|
| 313 |
return jsonify({
|
| 314 |
'success': success,
|
| 315 |
+
'model_loaded': model is not None,
|
| 316 |
+
'tokenizer_loaded': tokenizer is not None
|
| 317 |
})
|
| 318 |
except Exception as e:
|
| 319 |
+
return jsonify({'error': str(e)}), 500
|
| 320 |
|
| 321 |
if __name__ == '__main__':
|
| 322 |
+
# Load model on startup
|
| 323 |
+
logger.info("Starting AEGIS Economics AI...")
|
| 324 |
+
|
| 325 |
+
# Try to load model, but don't fail if it doesn't work
|
| 326 |
+
logger.info("Attempting to load model...")
|
| 327 |
+
model_loaded = load_model()
|
| 328 |
+
|
| 329 |
+
if model_loaded:
|
| 330 |
+
logger.info("Model loaded successfully, starting server...")
|
| 331 |
+
else:
|
| 332 |
+
logger.warning("Model failed to load, starting server anyway. Model can be loaded via /load_model endpoint.")
|
| 333 |
+
|
| 334 |
+
app.run(host='0.0.0.0', port=7860, debug=False)
|