|
|
"""Service pour initialiser le serveur MCP avec FastMCP""" |
|
|
|
|
|
from mcp.server.fastmcp import FastMCP |
|
|
from typing import Dict, Any |
|
|
import logging |
|
|
|
|
|
from fastapi import FastAPI |
|
|
|
|
|
from services.stance_model_manager import stance_model_manager |
|
|
from services.label_model_manager import kpa_model_manager |
|
|
from services.stt_service import speech_to_text |
|
|
from services.tts_service import text_to_speech |
|
|
from services.chat_service import generate_chat_response |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
mcp_server = FastMCP("NLP-Debater-MCP", json_response=True, stateless_http=False) |
|
|
|
|
|
|
|
|
@mcp_server.tool() |
|
|
def detect_stance(topic: str, argument: str) -> Dict[str, Any]: |
|
|
""" |
|
|
Détecte la stance (PRO/CON) d'un argument par rapport à un topic. |
|
|
|
|
|
Args: |
|
|
topic: Le sujet de débat (ex. "Assisted suicide should be a criminal offence") |
|
|
argument: L'argument à classifier (ex. "People have the right to choose...") |
|
|
|
|
|
Returns: |
|
|
Dict avec predicted_stance, confidence, probabilities. |
|
|
""" |
|
|
if not stance_model_manager.model_loaded: |
|
|
raise ValueError("Modèle stance non chargé") |
|
|
|
|
|
result = stance_model_manager.predict(topic, argument) |
|
|
return { |
|
|
"predicted_stance": result["predicted_stance"], |
|
|
"confidence": result["confidence"], |
|
|
"probability_con": result["probability_con"], |
|
|
"probability_pro": result["probability_pro"] |
|
|
} |
|
|
|
|
|
|
|
|
@mcp_server.tool() |
|
|
def match_keypoint_argument(argument: str, key_point: str) -> Dict[str, Any]: |
|
|
""" |
|
|
Prédit si un argument matche un key-point (apparie/non_apparie). |
|
|
|
|
|
Args: |
|
|
argument: Texte de l'argument |
|
|
key_point: Le key-point de référence |
|
|
|
|
|
Returns: |
|
|
Dict avec prediction (0/1), label, confidence, probabilities. |
|
|
""" |
|
|
if not kpa_model_manager.model_loaded: |
|
|
raise ValueError("Modèle KPA non chargé") |
|
|
|
|
|
result = kpa_model_manager.predict(argument, key_point) |
|
|
return { |
|
|
"prediction": result["prediction"], |
|
|
"label": result["label"], |
|
|
"confidence": result["confidence"], |
|
|
"probabilities": result["probabilities"] |
|
|
} |
|
|
|
|
|
|
|
|
@mcp_server.tool() |
|
|
def transcribe_audio(audio_path: str) -> str: |
|
|
""" |
|
|
Transcrit un fichier audio en texte (via Groq Whisper). |
|
|
|
|
|
Args: |
|
|
audio_path: Chemin vers le fichier audio (ex. temp file) |
|
|
|
|
|
Returns: |
|
|
Texte transcrit. |
|
|
""" |
|
|
return speech_to_text(audio_path) |
|
|
|
|
|
|
|
|
@mcp_server.tool() |
|
|
def generate_speech(text: str, voice: str = "Aaliyah-PlayAI", format: str = "wav") -> str: |
|
|
""" |
|
|
Génère un fichier audio à partir de texte (via Groq TTS). |
|
|
|
|
|
Args: |
|
|
text: Texte à synthétiser |
|
|
voice: Voix (défaut: Aaliyah-PlayAI) |
|
|
format: wav ou mp3 |
|
|
|
|
|
Returns: |
|
|
Chemin vers le fichier audio généré. |
|
|
""" |
|
|
return text_to_speech(text, voice, format) |
|
|
|
|
|
|
|
|
@mcp_server.tool() |
|
|
def generate_argument(user_input: str, conversation_id: str = None) -> str: |
|
|
""" |
|
|
Génère une réponse argumentative via chatbot (via Groq Llama). |
|
|
|
|
|
Args: |
|
|
user_input: Input utilisateur |
|
|
conversation_id: ID de session (optionnel) |
|
|
|
|
|
Returns: |
|
|
Réponse générée. |
|
|
""" |
|
|
return generate_chat_response(user_input, conversation_id) |
|
|
|
|
|
|
|
|
@mcp_server.resource("debate://prompt") |
|
|
def get_debate_prompt(topic: str) -> str: |
|
|
"""Récupère un template de prompt pour générer des arguments sur un topic.""" |
|
|
return f"Tu es un expert en débat. Génère 3 arguments PRO pour le topic: {topic}. Sois concis et persuasif." |
|
|
|
|
|
def init_mcp_server(app: FastAPI) -> None: |
|
|
""" |
|
|
Initialise et monte le serveur MCP sur l'app FastAPI. |
|
|
Ajoute les routes MCP à /api/v1/mcp (ex. : /tools, /call, /resources). |
|
|
""" |
|
|
|
|
|
from starlette.routing import Mount |
|
|
|
|
|
|
|
|
mcp_app = mcp_server.streamable_http_app(streamable_http_path="/mcp") |
|
|
|
|
|
|
|
|
app.mount("/api/v1/mcp", mcp_app) |
|
|
|
|
|
logger.info("✓ Serveur MCP initialisé et monté sur /api/v1/mcp avec tools NLP/STT/TTS") |
|
|
|
|
|
|
|
|
@mcp_server.tool() |
|
|
def health_check() -> Dict[str, Any]: |
|
|
return {"status": "healthy", "tools": list(mcp_server.tools.keys())} |