| | """
|
| | Advanced System Integration Module
|
| | Verbindet alle neu erstellten Module mit bestehender App
|
| | """
|
| |
|
| | import logging
|
| | from response_cache_engine import get_response_cache, ResponseCache
|
| | from language_system import get_language_detector, get_response_formatter, LanguageDetector, MultiLanguageResponseFormatter
|
| | from smart_response_logic import get_smart_response_generator, SmartResponseGenerator
|
| | from typing import Dict, Any, Optional
|
| | import time
|
| |
|
| | logger = logging.getLogger(__name__)
|
| |
|
| |
|
| | class AdvancedSystemIntegrator:
|
| | """
|
| | Zentrale Integration aller neuen AI-Systeme
|
| | """
|
| |
|
| | def __init__(self):
|
| | self.cache = get_response_cache()
|
| | self.language_detector = get_language_detector()
|
| | self.response_formatter = get_response_formatter()
|
| | self.smart_generator = get_smart_response_generator()
|
| |
|
| | self.stats = {
|
| | 'total_requests': 0,
|
| | 'cached_responses': 0,
|
| | 'average_response_time_ms': 0,
|
| | 'language_detected': {}
|
| | }
|
| |
|
| | logger.info("✅ Advanced System Integrator initialized")
|
| |
|
| | def process_complete_request(self, user_message: str, task_type: str = 'general') -> Dict[str, Any]:
|
| | """
|
| | Kompletter Request-Processing-Pipeline mit allen Features
|
| |
|
| | 1. Language Detection
|
| | 2. Cache Lookup
|
| | 3. Smart Response Generation
|
| | 4. Response Formatting
|
| | 5. Caching
|
| |
|
| | Returns: Dict mit vollständiger Response & Metadaten
|
| | """
|
| |
|
| | start_time = time.time()
|
| |
|
| |
|
| | detected_lang, lang_confidence = self.language_detector.detect_language(user_message)
|
| | self.response_formatter.current_language = detected_lang
|
| |
|
| | lang_key = f"{detected_lang}_{task_type}"
|
| | if lang_key not in self.stats['language_detected']:
|
| | self.stats['language_detected'][lang_key] = 0
|
| | self.stats['language_detected'][lang_key] += 1
|
| |
|
| | logger.debug(f"Language detected: {detected_lang} ({lang_confidence:.2f})")
|
| |
|
| |
|
| | if self.cache.should_use_cache(user_message, task_type):
|
| | cached_response = self.cache.find_similar_responses(user_message, task_type)
|
| |
|
| | if cached_response:
|
| | self.stats['cached_responses'] += 1
|
| | logger.info(f"🔥 Cache HIT for {task_type}")
|
| |
|
| | response_time = time.time() - start_time
|
| | self.cache.track_response_time(task_type, response_time * 1000)
|
| |
|
| | return {
|
| | 'response': cached_response,
|
| | 'metadata': {
|
| | 'language': detected_lang,
|
| | 'language_confidence': lang_confidence,
|
| | 'from_cache': True,
|
| | 'task_type': task_type,
|
| | 'response_time_ms': response_time * 1000
|
| | }
|
| | }
|
| |
|
| |
|
| | smart_result = self.smart_generator.process_message(user_message, detected_lang)
|
| |
|
| | logger.debug(f"Smart response type: {smart_result['response_type']}")
|
| |
|
| |
|
| | formatted_response = smart_result['response']
|
| |
|
| |
|
| | cache_key = self.cache.cache_response(user_message, formatted_response, task_type)
|
| |
|
| |
|
| | response_time = time.time() - start_time
|
| | self.cache.track_response_time(task_type, response_time * 1000)
|
| | self.stats['total_requests'] += 1
|
| |
|
| | result = {
|
| | 'response': formatted_response,
|
| | 'metadata': {
|
| | 'language': detected_lang,
|
| | 'language_confidence': lang_confidence,
|
| | 'from_cache': False,
|
| | 'task_type': task_type,
|
| | 'response_type': smart_result.get('response_type', 'general'),
|
| | 'cache_key': cache_key,
|
| | 'response_time_ms': response_time * 1000,
|
| | 'context_topics': smart_result.get('metadata', {}).get('context_topics', []),
|
| | 'confidence': smart_result.get('confidence', 0.7)
|
| | }
|
| | }
|
| |
|
| | return result
|
| |
|
| | def handle_code_generation(self, prompt: str) -> Dict[str, Any]:
|
| | """Spezialisiert für Code Generation"""
|
| | return self.process_complete_request(prompt, task_type='code_generation')
|
| |
|
| | def handle_image_generation(self, prompt: str) -> Dict[str, Any]:
|
| | """Spezialisiert für Image Generation"""
|
| | return self.process_complete_request(prompt, task_type='image_generation')
|
| |
|
| | def handle_code_analysis(self, code: str) -> Dict[str, Any]:
|
| | """Spezialisiert für Code Analysis"""
|
| | return self.process_complete_request(code, task_type='code_analysis')
|
| |
|
| | def get_system_stats(self) -> Dict[str, Any]:
|
| | """Gibt umfangreiche System-Statistiken zurück"""
|
| | cache_stats = self.cache.get_cache_stats()
|
| |
|
| | stats = {
|
| | 'requests': {
|
| | 'total': self.stats['total_requests'],
|
| | 'cached': self.stats['cached_responses'],
|
| | 'cache_hit_rate': (self.stats['cached_responses'] / self.stats['total_requests'] * 100)
|
| | if self.stats['total_requests'] > 0 else 0,
|
| | },
|
| | 'cache': cache_stats,
|
| | 'languages': self.stats['language_detected'],
|
| | 'response_generator_memory': self.smart_generator.get_memory_stats(),
|
| | }
|
| |
|
| | return stats
|
| |
|
| | def health_check(self) -> Dict[str, Any]:
|
| | """Health Check für alle Komponenten"""
|
| | return {
|
| | 'cache_initialized': self.cache is not None,
|
| | 'language_detector_initialized': self.language_detector is not None,
|
| | 'response_formatter_initialized': self.response_formatter is not None,
|
| | 'smart_generator_initialized': self.smart_generator is not None,
|
| | 'status': '🟢 All systems operational' if all([
|
| | self.cache,
|
| | self.language_detector,
|
| | self.response_formatter,
|
| | self.smart_generator
|
| | ]) else '🔴 Some systems are down'
|
| | }
|
| |
|
| |
|
| | class OptimizedResponseHandler:
|
| | """
|
| | Optimierter Handler für verschiedene Request-Typen
|
| | mit minimaler Latenz
|
| | """
|
| |
|
| | def __init__(self, integrator: AdvancedSystemIntegrator):
|
| | self.integrator = integrator
|
| | self.request_queue = []
|
| | self.response_pool = {}
|
| |
|
| | def quick_response(self, message: str) -> str:
|
| | """
|
| | Schnelle Response ohne volle Pipeline
|
| | (nur für sehr häufige Anfragen)
|
| | """
|
| |
|
| | quick_patterns = {
|
| | r'wie\s+geht': 'Mir geht es gut, danke! Wie kann ich dir helfen?',
|
| | r'danke': 'Gerne! 😊',
|
| | r'hallo|hi': 'Hallo! Wie kann ich dir heute helfen?',
|
| | }
|
| |
|
| | message_lower = message.lower()
|
| | for pattern, response in quick_patterns.items():
|
| | if __import__('re').search(pattern, message_lower):
|
| | return response
|
| |
|
| | return None
|
| |
|
| | def batch_process(self, messages: list) -> list:
|
| | """
|
| | Batch Processing für mehrere Messages
|
| | (Performance Optimization)
|
| | """
|
| | results = []
|
| |
|
| | for msg in messages:
|
| |
|
| | result = self.integrator.process_complete_request(msg)
|
| | results.append(result)
|
| |
|
| | return results
|
| |
|
| |
|
| |
|
| | _integrator = None
|
| |
|
| | def get_advanced_integrator() -> AdvancedSystemIntegrator:
|
| | """Gibt globale AdvancedSystemIntegrator Instanz zurück"""
|
| | global _integrator
|
| | if _integrator is None:
|
| | _integrator = AdvancedSystemIntegrator()
|
| | return _integrator
|
| |
|
| |
|
| | def process_message_with_all_features(message: str, task_type: str = 'general') -> Dict[str, Any]:
|
| | """
|
| | Convenience Function: Verarbeitet Message mit allen Features
|
| | """
|
| | integrator = get_advanced_integrator()
|
| | return integrator.process_complete_request(message, task_type)
|
| |
|
| |
|
| | if __name__ == "__main__":
|
| |
|
| | integrator = get_advanced_integrator()
|
| |
|
| |
|
| | print("=== Test 1: Deutsch Code ===")
|
| | result = integrator.handle_code_generation("Schreib mir einen Python code für Email Validator")
|
| | print(f"Language: {result['metadata']['language']}")
|
| | print(f"Cache: {result['metadata']['from_cache']}")
|
| | print(f"Response Time: {result['metadata']['response_time_ms']:.2f}ms")
|
| |
|
| |
|
| | print("\n=== Test 2: Englisch Image ===")
|
| | result = integrator.handle_image_generation("Create me a beautiful mountain landscape")
|
| | print(f"Language: {result['metadata']['language']}")
|
| |
|
| |
|
| | print("\n=== Health Check ===")
|
| | health = integrator.health_check()
|
| | print(health['status'])
|
| |
|
| |
|
| | print("\n=== System Stats ===")
|
| | import json
|
| | stats = integrator.get_system_stats()
|
| | print(json.dumps(stats, indent=2, ensure_ascii=False, default=str))
|
| |
|