File size: 9,672 Bytes
d613ffd | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 | """
Advanced System Integration Module
Verbindet alle neu erstellten Module mit bestehender App
"""
import logging
from response_cache_engine import get_response_cache, ResponseCache
from language_system import get_language_detector, get_response_formatter, LanguageDetector, MultiLanguageResponseFormatter
from smart_response_logic import get_smart_response_generator, SmartResponseGenerator
from typing import Dict, Any, Optional
import time
logger = logging.getLogger(__name__)
class AdvancedSystemIntegrator:
"""
Zentrale Integration aller neuen AI-Systeme
"""
def __init__(self):
self.cache = get_response_cache()
self.language_detector = get_language_detector()
self.response_formatter = get_response_formatter()
self.smart_generator = get_smart_response_generator()
self.stats = {
'total_requests': 0,
'cached_responses': 0,
'average_response_time_ms': 0,
'language_detected': {}
}
logger.info("✅ Advanced System Integrator initialized")
def process_complete_request(self, user_message: str, task_type: str = 'general') -> Dict[str, Any]:
"""
Kompletter Request-Processing-Pipeline mit allen Features
1. Language Detection
2. Cache Lookup
3. Smart Response Generation
4. Response Formatting
5. Caching
Returns: Dict mit vollständiger Response & Metadaten
"""
start_time = time.time()
# 1. LANGUAGE DETECTION
detected_lang, lang_confidence = self.language_detector.detect_language(user_message)
self.response_formatter.current_language = detected_lang
lang_key = f"{detected_lang}_{task_type}"
if lang_key not in self.stats['language_detected']:
self.stats['language_detected'][lang_key] = 0
self.stats['language_detected'][lang_key] += 1
logger.debug(f"Language detected: {detected_lang} ({lang_confidence:.2f})")
# 2. CACHE LOOKUP - nur für bestimmte Task-Types
if self.cache.should_use_cache(user_message, task_type):
cached_response = self.cache.find_similar_responses(user_message, task_type)
if cached_response:
self.stats['cached_responses'] += 1
logger.info(f"🔥 Cache HIT for {task_type}")
response_time = time.time() - start_time
self.cache.track_response_time(task_type, response_time * 1000)
return {
'response': cached_response,
'metadata': {
'language': detected_lang,
'language_confidence': lang_confidence,
'from_cache': True,
'task_type': task_type,
'response_time_ms': response_time * 1000
}
}
# 3. SMART RESPONSE GENERATION
smart_result = self.smart_generator.process_message(user_message, detected_lang)
logger.debug(f"Smart response type: {smart_result['response_type']}")
# 4. RESPONSE FORMATTING
formatted_response = smart_result['response']
# 5. CACHING
cache_key = self.cache.cache_response(user_message, formatted_response, task_type)
# Statistiken updaten
response_time = time.time() - start_time
self.cache.track_response_time(task_type, response_time * 1000)
self.stats['total_requests'] += 1
result = {
'response': formatted_response,
'metadata': {
'language': detected_lang,
'language_confidence': lang_confidence,
'from_cache': False,
'task_type': task_type,
'response_type': smart_result.get('response_type', 'general'),
'cache_key': cache_key,
'response_time_ms': response_time * 1000,
'context_topics': smart_result.get('metadata', {}).get('context_topics', []),
'confidence': smart_result.get('confidence', 0.7)
}
}
return result
def handle_code_generation(self, prompt: str) -> Dict[str, Any]:
"""Spezialisiert für Code Generation"""
return self.process_complete_request(prompt, task_type='code_generation')
def handle_image_generation(self, prompt: str) -> Dict[str, Any]:
"""Spezialisiert für Image Generation"""
return self.process_complete_request(prompt, task_type='image_generation')
def handle_code_analysis(self, code: str) -> Dict[str, Any]:
"""Spezialisiert für Code Analysis"""
return self.process_complete_request(code, task_type='code_analysis')
def get_system_stats(self) -> Dict[str, Any]:
"""Gibt umfangreiche System-Statistiken zurück"""
cache_stats = self.cache.get_cache_stats()
stats = {
'requests': {
'total': self.stats['total_requests'],
'cached': self.stats['cached_responses'],
'cache_hit_rate': (self.stats['cached_responses'] / self.stats['total_requests'] * 100)
if self.stats['total_requests'] > 0 else 0,
},
'cache': cache_stats,
'languages': self.stats['language_detected'],
'response_generator_memory': self.smart_generator.get_memory_stats(),
}
return stats
def health_check(self) -> Dict[str, Any]:
"""Health Check für alle Komponenten"""
return {
'cache_initialized': self.cache is not None,
'language_detector_initialized': self.language_detector is not None,
'response_formatter_initialized': self.response_formatter is not None,
'smart_generator_initialized': self.smart_generator is not None,
'status': '🟢 All systems operational' if all([
self.cache,
self.language_detector,
self.response_formatter,
self.smart_generator
]) else '🔴 Some systems are down'
}
class OptimizedResponseHandler:
"""
Optimierter Handler für verschiedene Request-Typen
mit minimaler Latenz
"""
def __init__(self, integrator: AdvancedSystemIntegrator):
self.integrator = integrator
self.request_queue = []
self.response_pool = {}
def quick_response(self, message: str) -> str:
"""
Schnelle Response ohne volle Pipeline
(nur für sehr häufige Anfragen)
"""
# Schnelle Pattern-Matches für häufige Fragen
quick_patterns = {
r'wie\s+geht': 'Mir geht es gut, danke! Wie kann ich dir helfen?',
r'danke': 'Gerne! 😊',
r'hallo|hi': 'Hallo! Wie kann ich dir heute helfen?',
}
message_lower = message.lower()
for pattern, response in quick_patterns.items():
if __import__('re').search(pattern, message_lower):
return response
return None
def batch_process(self, messages: list) -> list:
"""
Batch Processing für mehrere Messages
(Performance Optimization)
"""
results = []
for msg in messages:
# Nutze Cache wenn möglich für schnellere Batch-Verarbeitung
result = self.integrator.process_complete_request(msg)
results.append(result)
return results
# --- Globale Instanzen ---
_integrator = None
def get_advanced_integrator() -> AdvancedSystemIntegrator:
"""Gibt globale AdvancedSystemIntegrator Instanz zurück"""
global _integrator
if _integrator is None:
_integrator = AdvancedSystemIntegrator()
return _integrator
def process_message_with_all_features(message: str, task_type: str = 'general') -> Dict[str, Any]:
"""
Convenience Function: Verarbeitet Message mit allen Features
"""
integrator = get_advanced_integrator()
return integrator.process_complete_request(message, task_type)
if __name__ == "__main__":
# Test
integrator = get_advanced_integrator()
# Test 1: Deutsch Code Generation
print("=== Test 1: Deutsch Code ===")
result = integrator.handle_code_generation("Schreib mir einen Python code für Email Validator")
print(f"Language: {result['metadata']['language']}")
print(f"Cache: {result['metadata']['from_cache']}")
print(f"Response Time: {result['metadata']['response_time_ms']:.2f}ms")
# Test 2: Englisch Image Generation
print("\n=== Test 2: Englisch Image ===")
result = integrator.handle_image_generation("Create me a beautiful mountain landscape")
print(f"Language: {result['metadata']['language']}")
# Test 3: Health Check
print("\n=== Health Check ===")
health = integrator.health_check()
print(health['status'])
# Test 4: System Stats
print("\n=== System Stats ===")
import json
stats = integrator.get_system_stats()
print(json.dumps(stats, indent=2, ensure_ascii=False, default=str))
|