File size: 1,689 Bytes
fe13fc4 1685af4 fe13fc4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | from fastapi import APIRouter, HTTPException
from app.models.request_models import StoryRequest, StoryResponse
from app.services.story_generator import StoryGenerator
from app.services.story_refinement import clean_text, refine_story
import logging
# Create the router instance
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/generate_story", response_model=StoryResponse)
async def generate_story(request: StoryRequest):
try:
# Initialize generator and generate initial story
generator = StoryGenerator()
initial_story = generator.generate_story(
request.prompt,
request.max_length,
request.temperature
)
# Clean the initial story
cleaned_story = clean_text(initial_story)
try:
# Try to get the refined story
final_story = await refine_story(cleaned_story)
except Exception as e:
logger.error(f"Refinement failed: {str(e)}")
final_story = cleaned_story # Use cleaned story if refinement fails
return StoryResponse(story=final_story)
except Exception as e:
logger.error(f"Story generation failed: {str(e)}")
raise HTTPException(
status_code=500,
detail=f"Story generation failed: {str(e)}"
)
@router.get("/health")
@router.head("/health") # Add support for HEAD requests
async def health_check():
"""
Simple health check endpoint to keep the space awake.
Returns a 200 OK status with a basic message.
"""
return {"status": "alive", "message": "Story Generation Service is Running"}
|