|
|
import sys |
|
|
from pathlib import Path |
|
|
import os |
|
|
import subprocess |
|
|
import logging |
|
|
from contextlib import asynccontextmanager |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
app_dir = Path(__file__).parent |
|
|
if str(app_dir) not in sys.path: |
|
|
sys.path.insert(0, str(app_dir)) |
|
|
|
|
|
def install_ffmpeg(): |
|
|
"""Install ffmpeg on system (required for audio processing)""" |
|
|
try: |
|
|
result = subprocess.run(["which", "ffmpeg"], capture_output=True, text=True) |
|
|
if result.returncode == 0: |
|
|
version_result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True) |
|
|
if version_result.returncode == 0: |
|
|
version = version_result.stdout.split()[2] |
|
|
logger.info(f"β ffmpeg already installed: {version}") |
|
|
return True |
|
|
|
|
|
logger.info("Installing ffmpeg...") |
|
|
subprocess.run(["apt-get", "update"], check=True, capture_output=True) |
|
|
subprocess.run(["apt-get", "install", "-y", "ffmpeg"], check=True, capture_output=True) |
|
|
|
|
|
verify = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True) |
|
|
if verify.returncode == 0: |
|
|
version = verify.stdout.split()[2] |
|
|
logger.info(f"β ffmpeg installed successfully: {version}") |
|
|
return True |
|
|
return False |
|
|
except Exception as e: |
|
|
logger.warning(f"β οΈ ffmpeg installation warning: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
logger.info("="*60) |
|
|
logger.info("Checking system dependencies...") |
|
|
logger.info("="*60) |
|
|
install_ffmpeg() |
|
|
|
|
|
from fastapi import FastAPI |
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
import uvicorn |
|
|
|
|
|
from config import ( |
|
|
API_TITLE, API_DESCRIPTION, API_VERSION, |
|
|
HUGGINGFACE_API_KEY, HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_LABEL_MODEL_ID, |
|
|
HOST, PORT, RELOAD, |
|
|
CORS_ORIGINS, CORS_CREDENTIALS, CORS_METHODS, CORS_HEADERS, |
|
|
PRELOAD_MODELS_ON_STARTUP, LOAD_STANCE_MODEL, LOAD_KPA_MODEL, |
|
|
LOAD_STT_MODEL, LOAD_CHATBOT_MODEL |
|
|
) |
|
|
|
|
|
@asynccontextmanager |
|
|
async def lifespan(app: FastAPI): |
|
|
"""Load models on startup and cleanup on shutdown""" |
|
|
logger.info("="*60) |
|
|
logger.info("π STARTING API - Loading Models...") |
|
|
logger.info("="*60) |
|
|
|
|
|
if PRELOAD_MODELS_ON_STARTUP: |
|
|
|
|
|
if LOAD_STANCE_MODEL: |
|
|
try: |
|
|
logger.info(f"Loading Stance Model: {HUGGINGFACE_STANCE_MODEL_ID}") |
|
|
from services.stance_model_manager import load_model as load_stance |
|
|
load_stance(HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_API_KEY) |
|
|
logger.info("β Stance model loaded successfully") |
|
|
except Exception as e: |
|
|
logger.error(f"β Stance model loading failed: {str(e)}") |
|
|
|
|
|
|
|
|
if LOAD_KPA_MODEL: |
|
|
try: |
|
|
logger.info(f"Loading KPA Model: {HUGGINGFACE_LABEL_MODEL_ID}") |
|
|
from services.label_model_manager import load_model as load_kpa |
|
|
load_kpa(HUGGINGFACE_LABEL_MODEL_ID, HUGGINGFACE_API_KEY) |
|
|
logger.info("β KPA model loaded successfully") |
|
|
except Exception as e: |
|
|
logger.error(f"β KPA model loading failed: {str(e)}") |
|
|
|
|
|
|
|
|
if LOAD_STT_MODEL: |
|
|
try: |
|
|
logger.info("Loading STT Model (Whisper)...") |
|
|
from services.stt_service import load_stt_model |
|
|
load_stt_model() |
|
|
logger.info("β STT model loaded successfully") |
|
|
except Exception as e: |
|
|
logger.error(f"β STT model loading failed: {str(e)}") |
|
|
|
|
|
|
|
|
if LOAD_CHATBOT_MODEL: |
|
|
try: |
|
|
logger.info("Loading Chatbot Model (DialoGPT)...") |
|
|
from services.chatbot_service import load_chatbot_model |
|
|
load_chatbot_model() |
|
|
logger.info("β Chatbot model loaded successfully") |
|
|
except Exception as e: |
|
|
logger.error(f"β Chatbot model loading failed: {str(e)}") |
|
|
|
|
|
logger.info("="*60) |
|
|
logger.info("β API startup complete - Ready to serve requests") |
|
|
logger.info(f"π API Docs: http://{HOST}:{PORT}/docs") |
|
|
logger.info("="*60) |
|
|
|
|
|
yield |
|
|
|
|
|
|
|
|
logger.info("Shutting down API...") |
|
|
|
|
|
|
|
|
app = FastAPI( |
|
|
title=API_TITLE, |
|
|
description=API_DESCRIPTION, |
|
|
version=API_VERSION, |
|
|
docs_url="/docs", |
|
|
redoc_url="/redoc", |
|
|
lifespan=lifespan, |
|
|
) |
|
|
|
|
|
|
|
|
app.add_middleware( |
|
|
CORSMiddleware, |
|
|
allow_origins=CORS_ORIGINS, |
|
|
allow_credentials=CORS_CREDENTIALS, |
|
|
allow_methods=CORS_METHODS, |
|
|
allow_headers=CORS_HEADERS, |
|
|
) |
|
|
|
|
|
|
|
|
try: |
|
|
from routes.audio import router as audio_router |
|
|
app.include_router(audio_router, prefix="/audio", tags=["Audio - Voice Chatbot"]) |
|
|
logger.info("β Audio routes registered") |
|
|
except Exception as e: |
|
|
logger.warning(f"β οΈ Audio routes failed to load: {e}") |
|
|
|
|
|
try: |
|
|
from routes import api_router |
|
|
app.include_router(api_router) |
|
|
logger.info("β API routes registered") |
|
|
except Exception as e: |
|
|
logger.warning(f"β οΈ API routes failed to load: {e}") |
|
|
|
|
|
|
|
|
@app.get("/") |
|
|
async def root(): |
|
|
"""Root endpoint""" |
|
|
return { |
|
|
"message": "NLP Debater API with Voice Chatbot", |
|
|
"status": "healthy", |
|
|
"version": API_VERSION, |
|
|
"docs": "/docs", |
|
|
"endpoints": { |
|
|
"audio": "/docs#/Audio%20-%20Voice%20Chatbot", |
|
|
"health": "/health", |
|
|
"models-status": "/models-status" |
|
|
} |
|
|
} |
|
|
|
|
|
@app.get("/health") |
|
|
async def health_check(): |
|
|
"""Simple health check""" |
|
|
return {"status": "healthy", "message": "API is running"} |
|
|
|
|
|
@app.get("/models-status") |
|
|
async def models_status(): |
|
|
"""Check which models are loaded""" |
|
|
status = { |
|
|
"stt_model": "unknown", |
|
|
"tts_engine": "gtts (free)", |
|
|
"chatbot_model": "unknown" |
|
|
} |
|
|
|
|
|
try: |
|
|
from services.stt_service import stt_pipeline |
|
|
status["stt_model"] = "loaded" if stt_pipeline is not None else "not loaded" |
|
|
except: |
|
|
status["stt_model"] = "error" |
|
|
|
|
|
try: |
|
|
from services.chatbot_service import chatbot_pipeline |
|
|
status["chatbot_model"] = "loaded" if chatbot_pipeline is not None else "not loaded" |
|
|
except: |
|
|
status["chatbot_model"] = "error" |
|
|
|
|
|
return status |
|
|
|
|
|
@app.get("/check-ffmpeg") |
|
|
async def check_ffmpeg(): |
|
|
"""Check if ffmpeg is installed""" |
|
|
try: |
|
|
result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True) |
|
|
if result.returncode == 0: |
|
|
version = result.stdout.split('\n')[0] |
|
|
return {"status": "available", "version": version} |
|
|
else: |
|
|
return {"status": "error", "error": result.stderr} |
|
|
except FileNotFoundError: |
|
|
return {"status": "not found", "error": "ffmpeg is not installed"} |
|
|
|
|
|
if __name__ == "__main__": |
|
|
logger.info(f"π Starting server on {HOST}:{PORT}") |
|
|
logger.info(f"π Documentation: http://{HOST}:{PORT}/docs") |
|
|
|
|
|
uvicorn.run( |
|
|
"main:app", |
|
|
host=HOST, |
|
|
port=PORT, |
|
|
reload=RELOAD, |
|
|
log_level="info" |
|
|
) |
|
|
|