""" app.py v4.0 FINAL - FastAPI pour Chunking Sémantique Intelligent CORRECTIONS ET AMÉLIORATIONS: ✅ Import SmartChunkerPipeline (correct) ✅ Méthodes synchronisées avec chunker_pipeline.py ✅ Gestion d'erreurs robuste ✅ Endpoints optimisés pour n8n ✅ Variables d'environnement sécurisées ✅ Monitoring et health checks complets ✅ Configuration HF Space gratuit optimisée """ import os import tempfile import logging import time import asyncio import gc from pathlib import Path from fastapi import FastAPI, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse from pydantic import BaseModel, Field from typing import List, Dict, Any, Optional from concurrent.futures import ThreadPoolExecutor import os #os.environ["HF_HOME"] = "/tmp/cache/huggingface" #os.environ["TRANSFORMERS_CACHE"] = "/tmp/cache/transformers" os.environ["HF_HOME"] = "/tmp/hf" os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf" # Configuration logging optimisée logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", handlers=[ logging.StreamHandler(), logging.FileHandler("/app/logs/app.log", mode="a") if os.path.exists("/app/logs") else logging.StreamHandler() ] ) logger = logging.getLogger(__name__) # ✅ IMPORTS PRINCIPAUX - Vérification de compatibilité try: from chunker_pipeline import SmartChunkerPipeline from schemas import ChunkRequest, ChunkResponse, ChunkMetadata logger.info("✅ Modules chunking v4.0 importés avec succès") except ImportError as e: logger.error(f"❌ ERREUR CRITIQUE - Import modules chunking: {e}") logger.error("Vérifiez que les fichiers chunker_pipeline.py et schemas.py existent") raise # ✅ CONFIGURATION ENVIRONNEMENT HF SPACE SÉCURISÉE def setup_environment(): """Configuration optimisée pour Hugging Face Space gratuit""" # ✅ Compatible Hugging Face Space (car /tmp est accessible en écriture) cache_base = os.path.join(tempfile.gettempdir(), "cache") os.environ["HF_HOME"] = os.path.join(cache_base, "huggingface") os.environ["TRANSFORMERS_CACHE"] = os.path.join(cache_base, "transformers") os.environ["HF_HUB_CACHE"] = os.path.join(cache_base, "hub") # Optimisations performance os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" os.environ["TRANSFORMERS_VERBOSITY"] = "error" os.environ["PYTHONUNBUFFERED"] = "1" # Création dossiers cache sécurisés cache_dirs = [ os.environ["HF_HOME"], os.environ["TRANSFORMERS_CACHE"], os.environ["HF_HUB_CACHE"], os.path.join(cache_base, "llm"), os.path.join(cache_base, "embeddings"), os.path.join(cache_base, "logs") ] for cache_dir in cache_dirs: try: os.makedirs(cache_dir, exist_ok=True) os.chmod(cache_dir, 0o755) except Exception as e: logger.warning(f"⚠️ Impossible de créer {cache_dir}: {e}") logger.info("✅ Environnement HF Space configuré") # Configuration environnement setup_environment() # ✅ INITIALISATION FASTAPI OPTIMISÉE app = FastAPI( title="🧠 Chunking Sémantique Intelligent API", description=""" **API de découpage récursif hiérarchique avec parentalité** 🚀 **Fonctionnalités:** - Chunking sémantique avec Chonkie + LlamaIndex - Relations bidirectionnelles parent/enfant - Export Obsidian format [[Titre]], id - Base connaissance pour agents IA spécialisés - 100% gratuit sur HuggingFace Space 🔧 **Optimisé pour n8n et automation** """, version="4.0.0", docs_url="/docs", redoc_url="/redoc", openapi_tags=[ {"name": "chunking", "description": "Endpoints de chunking principal"}, {"name": "monitoring", "description": "Santé et configuration"}, {"name": "test", "description": "Tests et validation"} ] ) # ✅ CORS ÉTENDU POUR N8N ET INTÉGRATIONS app.add_middleware( CORSMiddleware, allow_origins=["*"], # Nécessaire pour n8n allow_credentials=True, allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"], allow_headers=["*"], expose_headers=["*"] ) # ✅ VARIABLES GLOBALES pipeline = None executor = ThreadPoolExecutor(max_workers=1) # HF Space gratuit = 1 worker max startup_time = time.time() request_count = 0 # ✅ MIDDLEWARE MONITORING ET SÉCURITÉ @app.middleware("http") async def monitoring_middleware(request: Request, call_next): """Middleware pour monitoring et gestion erreurs globales""" global request_count start_time = time.time() request_count += 1 # Headers sécurité response = None try: response = await call_next(request) response.headers["X-API-Version"] = "4.0.0" response.headers["X-Powered-By"] = "Chunking-Semantic-AI" # Log performance process_time = time.time() - start_time if process_time > 5.0: # Log requêtes lentes logger.warning(f"⚠️ Requête lente: {request.url.path} - {process_time:.2f}s") return response except Exception as e: logger.error(f"❌ Erreur middleware {request.url.path}: {str(e)}") # Réponse d'erreur structurée return JSONResponse( status_code=500, content={ "error": "Erreur interne du serveur", "detail": str(e), "path": str(request.url.path), "timestamp": time.time(), "request_id": request_count, "version": "4.0.0" } ) # ✅ ÉVÉNEMENTS LIFECYCLE @app.on_event("startup") async def startup_event(): """Initialisation complète au démarrage""" global pipeline try: logger.info("🚀 === DÉMARRAGE API CHUNKING SÉMANTIQUE v4.0 ===") # Vérification espace disque import shutil total, used, free = shutil.disk_usage("/app") free_gb = free / (1024**3) logger.info(f"💾 Espace libre: {free_gb:.1f}GB") if free_gb < 1.0: logger.warning("⚠️ Espace disque faible (<1GB)") # Initialisation pipeline principal logger.info("🔧 Initialisation SmartChunkerPipeline...") pipeline = SmartChunkerPipeline() await pipeline.initialize() # Vérification santé health = await pipeline.health_check_v4() logger.info(f"🏥 Status santé: {health['status']}") if health['status'] != 'healthy': logger.warning(f"⚠️ Pipeline en mode dégradé: {health['status']}") # Configuration système config_info = await pipeline.get_config_info_v4() logger.info(f"🧠 LLM: {config_info['models']['llm_model']}") logger.info(f"🔤 Embedding: {config_info['models']['embedding_model']}") logger.info(f"🦛 Chonkie: {'✅' if config_info['models']['chonkie_available'] else '❌'}") # Test rapide de fonctionnement test_request = ChunkRequest( text="Test d'initialisation du système de chunking.", titre="Test Init", source_id="init_test" ) test_result = await pipeline.process_text(test_request) logger.info(f"✅ Test init: {test_result.total_chunks} chunks générés") logger.info("🎉 API Chunking Sémantique v4.0 prête !") except Exception as e: logger.error(f"❌ ERREUR CRITIQUE lors du démarrage: {e}") logger.error("Le service ne pourra pas fonctionner correctement") raise @app.on_event("shutdown") async def shutdown_event(): """Nettoyage propre à l'arrêt""" global pipeline, executor try: logger.info("🛑 Arrêt du service en cours...") # Nettoyage pipeline if pipeline: await pipeline.cleanup() logger.info("✅ Pipeline nettoyé") # Nettoyage executor if executor: executor.shutdown(wait=True, timeout=10) logger.info("✅ Executor fermé") # Nettoyage mémoire final gc.collect() # Statistiques finales uptime = time.time() - startup_time logger.info(f"📊 Statistiques finales:") logger.info(f" - Temps de fonctionnement: {uptime:.1f}s") logger.info(f" - Requêtes traitées: {request_count}") logger.info(f" - Moyenne: {request_count/uptime:.2f} req/s") logger.info("✅ Arrêt propre terminé") except Exception as e: logger.error(f"⚠️ Erreur lors de l'arrêt: {e}") # ✅ ENDPOINTS PRINCIPAUX @app.get("/", tags=["monitoring"]) async def root(): """Page d'accueil avec informations complètes du service""" uptime = time.time() - startup_time return { "service": "🧠 Chunking Sémantique Intelligent API", "version": "4.0.0", "status": "🟢 Opérationnel" if pipeline else "🔴 Non initialisé", "uptime_seconds": round(uptime, 1), "requests_processed": request_count, "features": [ "🧩 Chunking sémantique avec Chonkie", "🏗️ Hiérarchie récursive intelligente", "🔗 Relations bidirectionnelles parent/enfant", "📝 Export Obsidian format [[Titre]], id", "🤖 Base connaissance pour agents IA spécialisés", "💰 100% gratuit sur HuggingFace Space", "🔄 Optimisé pour n8n et automation" ], "endpoints": { "chunking": [ "POST /chunk - Chunking principal", "POST /chunk-batch - Traitement par lots" ], "monitoring": [ "GET /health - Vérification santé détaillée", "GET /config - Configuration système", "GET /stats - Statistiques d'usage" ], "test": [ "POST /test - Test de validation", "GET /ping - Test connectivité simple" ] }, "documentation": { "interactive": "/docs", "redoc": "/redoc" }, "support": { "n8n_compatible": True, "max_text_length": "500,000 caractères", "max_batch_size": 3, "response_format": "JSON structuré" } } @app.get("/health", tags=["monitoring"]) async def health_check(): """Vérification santé complète et détaillée""" try: if pipeline is None: return { "status": "🔴 error", "message": "Pipeline non initialisé", "version": "4.0.0", "timestamp": time.time(), "uptime": time.time() - startup_time, "critical": True } # Health check pipeline health_result = await pipeline.health_check_v4() # Informations mémoire memory_info = pipeline.get_memory_usage_v4() # Statistiques système import psutil try: cpu_percent = psutil.cpu_percent(interval=1) memory_percent = psutil.virtual_memory().percent except: cpu_percent = 0 memory_percent = 0 # Status coloré status_map = { "healthy": "🟢 healthy", "degraded": "🟡 degraded", "unhealthy": "🔴 unhealthy", "error": "🔴 error" } return { **health_result, "status": status_map.get(health_result['status'], health_result['status']), "memory_info": memory_info, "system_info": { "cpu_percent": cpu_percent, "memory_percent": memory_percent, "uptime": time.time() - startup_time, "requests_processed": request_count }, "version": "4.0.0" } except Exception as e: logger.error(f"❌ Erreur health check: {e}") return { "status": "🔴 error", "message": f"Erreur health check: {str(e)}", "version": "4.0.0", "timestamp": time.time(), "critical": True } @app.get("/config", tags=["monitoring"]) async def get_config(): """Configuration système détaillée""" try: if pipeline is None: raise HTTPException(status_code=503, detail="Pipeline non initialisé") config_info = await pipeline.get_config_info_v4() # Ajout informations runtime runtime_info = { "python_version": f"{os.sys.version_info.major}.{os.sys.version_info.minor}.{os.sys.version_info.micro}", "platform": os.name, "workers": 1, "max_request_size": "500KB", "cache_enabled": True, "environment": "HuggingFace Space" } return { **config_info, "runtime_info": runtime_info, "api_version": "4.0.0", "timestamp": time.time() } except Exception as e: logger.error(f"❌ Erreur récupération config: {e}") raise HTTPException(status_code=500, detail=f"Erreur config: {str(e)}") @app.get("/stats", tags=["monitoring"]) async def get_stats(): """Statistiques d'usage détaillées""" uptime = time.time() - startup_time avg_requests_per_minute = (request_count / uptime) * 60 if uptime > 0 else 0 return { "service_stats": { "uptime_seconds": round(uptime, 1), "uptime_formatted": f"{int(uptime//3600)}h {int((uptime%3600)//60)}m {int(uptime%60)}s", "total_requests": request_count, "avg_requests_per_minute": round(avg_requests_per_minute, 2) }, "system_health": { "pipeline_initialized": pipeline is not None, "memory_usage": pipeline.get_memory_usage_v4() if pipeline else "N/A" }, "version": "4.0.0", "timestamp": time.time() } @app.post("/chunk", response_model=ChunkResponse, tags=["chunking"]) async def chunk_text(request: ChunkRequest): """ 🧠 ENDPOINT PRINCIPAL - Chunking sémantique intelligent **Fonctionnalités:** - Chunking sémantique avec Chonkie + LlamaIndex - Relations hiérarchiques bidirectionnelles - Export Obsidian format [[Titre]], id - Base connaissance pour agents IA **Optimisé pour n8n et automation** """ if pipeline is None: raise HTTPException( status_code=503, detail="❌ Pipeline non initialisé - Redémarrez le service" ) start_time = time.time() try: logger.info(f"📝 Début chunking: {request.titre or 'Sans titre'} ({len(request.text)} chars)") # Validation entrées renforcée if not request.text or len(request.text.strip()) < 10: raise HTTPException( status_code=400, detail="❌ Le texte doit contenir au moins 10 caractères" ) # Limite HF Space gratuit max_length = 500000 if len(request.text) > max_length: raise HTTPException( status_code=400, detail=f"❌ Texte trop long ({len(request.text)} chars). Maximum: {max_length:,} caractères" ) # Traitement principal result = await pipeline.process_text(request) processing_time = time.time() - start_time # Log succès logger.info( f"✅ Chunking terminé: {result.total_chunks} chunks, " f"{result.total_tokens} tokens en {processing_time:.2f}s" ) return result except HTTPException: raise except Exception as e: logger.error(f"❌ Erreur chunking: {str(e)}") # Nettoyage mémoire d'urgence try: await pipeline._cleanup_memory_v4() gc.collect() except: pass raise HTTPException( status_code=500, detail=f"❌ Erreur traitement: {str(e)}" ) @app.post("/chunk-batch", tags=["chunking"]) async def chunk_batch(requests: List[ChunkRequest]): """ 📦 Traitement par lots optimisé pour HF Space gratuit **Limites:** - Maximum 3 textes par lot - Traitement séquentiel pour économiser la mémoire """ # Validation limite batch pour Space gratuit max_batch_size = 3 if len(requests) > max_batch_size: raise HTTPException( status_code=400, detail=f"❌ Maximum {max_batch_size} textes par lot sur HF Space gratuit" ) if pipeline is None: raise HTTPException(status_code=503, detail="❌ Pipeline non initialisé") start_time = time.time() results = [] try: logger.info(f"📦 Début batch: {len(requests)} textes") for idx, request in enumerate(requests): try: logger.info(f" 📝 Traitement {idx+1}/{len(requests)}: {request.titre or 'Sans titre'}") result = await pipeline.process_text(request) results.append({ "success": True, "index": idx, "source_id": request.source_id, "result": result }) # Nettoyage entre chaque traitement if idx < len(requests) - 1: # Pas pour le dernier await pipeline._cleanup_memory_v4() except Exception as e: logger.error(f"❌ Erreur batch item {idx}: {e}") results.append({ "success": False, "index": idx, "source_id": request.source_id or f"item_{idx}", "error": str(e) }) total_time = time.time() - start_time successful_results = [r for r in results if r["success"]] # Nettoyage final try: await pipeline._cleanup_memory_v4() except: pass logger.info( f"✅ Batch terminé: {len(successful_results)}/{len(requests)} succès " f"en {total_time:.2f}s" ) return { "results": results, "summary": { "total_processed": len(requests), "successful": len(successful_results), "failed": len(requests) - len(successful_results), "success_rate": f"{(len(successful_results)/len(requests)*100):.1f}%", "total_processing_time": round(total_time, 2), "avg_time_per_item": round(total_time / len(requests), 2) }, "version": "4.0.0", "timestamp": time.time() } except Exception as e: logger.error(f"❌ Erreur batch global: {e}") gc.collect() raise HTTPException( status_code=500, detail=f"❌ Erreur traitement batch: {str(e)}" ) @app.post("/test", tags=["test"]) async def test_chunking(): """🧪 Test de validation du déploiement""" if pipeline is None: raise HTTPException(status_code=503, detail="❌ Pipeline non initialisé") try: test_request = ChunkRequest( text=""" Ceci est un test complet de chunking sémantique intelligent v4.0. Le système utilise Chonkie pour le découpage sémantique avancé. Il génère des relations hiérarchiques bidirectionnelles entre les chunks. L'export Obsidian utilise le format [[Titre]], id pour les liens. Les agents IA reçoivent une base de connaissance parfaitement structurée. Ce test valide toutes les fonctionnalités principales du système. """, titre="Test Validation v4.0", source_id="validation_test_v4", include_metadata=True, export_obsidian=True, export_agents=True ) start_time = time.time() result = await pipeline.process_text(test_request) test_time = time.time() - start_time # Vérifications détaillées checks = { "chunking_functional": result.total_chunks > 0, "metadata_extracted": len(result.chunks[0].metadata.keywords) > 0 if result.chunks else False, "hierarchy_built": len(result.hierarchy) > 0, "obsidian_export": result.obsidian_export is not None, "agent_knowledge": result.agent_knowledge is not None, "processing_time_ok": test_time < 30 # Moins de 30s } success_rate = sum(checks.values()) / len(checks) * 100 return { "test_status": "✅ SUCCESS" if success_rate == 100 else "⚠️ PARTIAL", "success_rate": f"{success_rate:.1f}%", "results": { "chunks_generated": result.total_chunks, "tokens_processed": result.total_tokens, "processing_time": round(test_time, 2), "hierarchy_levels": len(result.hierarchy) }, "checks": checks, "features_validated": [ "✅ Chunking sémantique Chonkie" if checks["chunking_functional"] else "❌ Chunking failed", "✅ Extraction métadonnées" if checks["metadata_extracted"] else "❌ Metadata failed", "✅ Relations hiérarchiques" if checks["hierarchy_built"] else "❌ Hierarchy failed", "✅ Export Obsidian" if checks["obsidian_export"] else "❌ Obsidian failed", "✅ Base agents IA" if checks["agent_knowledge"] else "❌ Agents failed" ], "version": "4.0.0", "timestamp": time.time() } except Exception as e: logger.error(f"❌ Test validation échoué: {e}") raise HTTPException( status_code=500, detail=f"❌ Test échoué: {str(e)}" ) @app.get("/ping", tags=["test"]) async def ping(): """🏓 Test de connectivité simple""" return { "ping": "pong", "timestamp": time.time(), "version": "4.0.0", "status": "🟢 Opérationnel" if pipeline else "🔴 Non initialisé" } # ✅ GESTION D'ERREURS PERSONNALISÉE @app.exception_handler(404) async def not_found_handler(request: Request, exc): """Gestionnaire 404 personnalisé""" return JSONResponse( status_code=404, content={ "error": "❌ Endpoint non trouvé", "message": f"L'endpoint {request.url.path} n'existe pas", "available_endpoints": { "chunking": ["/chunk", "/chunk-batch"], "monitoring": ["/health", "/config", "/stats"], "test": ["/test", "/ping"], "docs": ["/docs", "/redoc"] }, "suggestion": "Consultez /docs pour la documentation complète", "version": "4.0.0" } ) @app.exception_handler(422) async def validation_exception_handler(request: Request, exc): """Gestionnaire erreurs de validation Pydantic""" return JSONResponse( status_code=422, content={ "error": "❌ Erreur de validation", "message": "Les données envoyées ne respectent pas le format attendu", "detail": str(exc), "hint": "Vérifiez la structure de votre requête JSON", "documentation": "/docs", "version": "4.0.0" } ) # ✅ POINT D'ENTRÉE PRINCIPAL if __name__ == "__main__": import uvicorn logger.info("🚀 Démarrage direct du serveur...") # Configuration optimisée pour HF Space gratuit uvicorn.run( "app:app", host="0.0.0.0", port=7860, # Port standard HF Space reload=False, # Mode production access_log=False, # Économie ressources log_level="info", workers=1, # HF Space gratuit = 1 worker timeout_keep_alive=30, limit_concurrency=10, # Limite connexions simultanées timeout_graceful_shutdown=30 )