Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Request | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from contextlib import asynccontextmanager | |
| import os | |
| import logging | |
| from config.settings import get_settings | |
| from routers import mentors, recommend, health | |
| from services.embedding_service import EmbeddingService | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| settings = get_settings() | |
| async def lifespan(app: FastAPI): | |
| logger.info("Starting application...") | |
| try: | |
| logger.info("Loading embedding service...") | |
| embedding_service = EmbeddingService() | |
| app.state.embedding_service = embedding_service | |
| logger.info("Application started successfully") | |
| except Exception as e: | |
| logger.error(f"Failed to start application: {str(e)}", exc_info=True) | |
| raise | |
| yield | |
| logger.info("Shutting down application...") | |
| if hasattr(app.state, 'embedding_service'): | |
| del app.state.embedding_service | |
| app = FastAPI( | |
| title="MentorMe AI Recommendation Server", | |
| description="AI-powered mentor-mentee recommendation using Vietnamese_Embedding and Pinecone", | |
| version="1.0.0", | |
| lifespan=lifespan | |
| ) | |
| cors_origins = settings.CORS_ORIGINS | |
| if not cors_origins: | |
| cors_origins = ["*"] | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=cors_origins, | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| app.include_router(health.router, prefix="/api/v1", tags=["Health"]) | |
| app.include_router(mentors.router, prefix="/api/v1", tags=["Mentors"]) | |
| app.include_router(recommend.router, prefix="/api/v1", tags=["Recommendations"]) | |
| async def log_requests(request: Request, call_next): | |
| logger.info(f"Incoming request: {request.method} {request.url}") | |
| logger.debug(f"Headers: {dict(request.headers)}") | |
| response = await call_next(request) | |
| logger.info(f"Response status: {response.status_code}") | |
| return response | |
| async def root(): | |
| return { | |
| "message": "MentorMe AI Recommendation Server", | |
| "version": "1.0.0", | |
| "status": "running", | |
| "endpoints": { | |
| "health": "/api/v1/health", | |
| "upsert_mentor": "/api/v1/mentors/upsert", | |
| "recommend": "/api/v1/recommend" | |
| } | |
| } | |
| async def test_post(request: Request): | |
| logger.info(f"Test POST received: {request.method} {request.url}") | |
| logger.info(f"Headers: {dict(request.headers)}") | |
| body = await request.body() | |
| logger.info(f"Body: {body}") | |
| return { | |
| "message": "POST request received successfully", | |
| "method": request.method, | |
| "url": str(request.url), | |
| "headers": dict(request.headers) | |
| } | |