File size: 1,727 Bytes
90cadf0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# routers/ideas.py
from fastapi import APIRouter, Depends, HTTPException, status
from starlette.concurrency import run_in_threadpool
import models
import config
from services import generation, security
import logging

logger = logging.getLogger(__name__)
router = APIRouter(
    prefix="/ideas",
    tags=["Content Ideas"],
    dependencies=[Depends(security.get_api_key)] # Apply API key security to all routes here
)

@router.post("/", response_model=models.IdeaResponse, status_code=status.HTTP_201_CREATED)
async def create_content_ideas(request: models.IdeaRequest):
    """
    Generates content ideas based on a prompt using a text generation model.
    """
    try:
        logger.info(f"Received idea generation request: prompt='{request.prompt}'")
        # Run the synchronous, CPU/GPU-bound ML model in a thread pool
        ideas = await run_in_threadpool(
            generation.generate_ideas_sync, # Pass the function itself
            prompt=request.prompt,          # Pass arguments by name
            max_length=request.max_length,
            num_ideas=request.num_ideas
        )
        logger.info(f"Successfully generated {len(ideas)} idea(s).")
        return models.IdeaResponse(ideas=ideas, model_name=config.TEXT_MODEL_NAME)
    except RuntimeError as e:
         logger.error(f"Model runtime error during idea generation: {e}", exc_info=True)
         raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail=f"Model inference failed: {e}")
    except Exception as e:
        logger.error(f"Unexpected error during idea generation: {e}", exc_info=True)
        raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"An unexpected error occurred: {e}")