Spaces:
Sleeping
Sleeping
| # routers/ideas.py | |
| from fastapi import APIRouter, Depends, HTTPException, status | |
| from starlette.concurrency import run_in_threadpool | |
| import models | |
| import config | |
| from services import generation, security | |
| import logging | |
| logger = logging.getLogger(__name__) | |
| router = APIRouter( | |
| prefix="/ideas", | |
| tags=["Content Ideas"], | |
| dependencies=[Depends(security.get_api_key)] # Apply API key security to all routes here | |
| ) | |
| async def create_content_ideas(request: models.IdeaRequest): | |
| """ | |
| Generates content ideas based on a prompt using a text generation model. | |
| """ | |
| try: | |
| logger.info(f"Received idea generation request: prompt='{request.prompt}'") | |
| # Run the synchronous, CPU/GPU-bound ML model in a thread pool | |
| ideas = await run_in_threadpool( | |
| generation.generate_ideas_sync, # Pass the function itself | |
| prompt=request.prompt, # Pass arguments by name | |
| max_length=request.max_length, | |
| num_ideas=request.num_ideas | |
| ) | |
| logger.info(f"Successfully generated {len(ideas)} idea(s).") | |
| return models.IdeaResponse(ideas=ideas, model_name=config.TEXT_MODEL_NAME) | |
| except RuntimeError as e: | |
| logger.error(f"Model runtime error during idea generation: {e}", exc_info=True) | |
| raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=f"Model inference failed: {e}") | |
| except Exception as e: | |
| logger.error(f"Unexpected error during idea generation: {e}", exc_info=True) | |
| raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"An unexpected error occurred: {e}") |