flow2api / src /api /routes.py
iyougame's picture
Sync from GitHub: ed4fdc688e67158a952c221a48279fe821e96030
791d9c1 verified
"""API routes - OpenAI compatible endpoints"""
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse, JSONResponse
from typing import List
import base64
import re
import json
from ..core.auth import verify_api_key_header
from ..core.models import ChatCompletionRequest
from ..services.generation_handler import GenerationHandler, MODEL_CONFIG
router = APIRouter()
# Dependency injection will be set up in main.py
generation_handler: GenerationHandler = None
def set_generation_handler(handler: GenerationHandler):
"""Set generation handler instance"""
global generation_handler
generation_handler = handler
@router.get("/v1/models")
async def list_models(api_key: str = Depends(verify_api_key_header)):
"""List available models"""
models = []
for model_id, config in MODEL_CONFIG.items():
description = f"{config['type'].capitalize()} generation"
if config['type'] == 'image':
description += f" - {config['model_name']}"
else:
description += f" - {config['model_key']}"
models.append({
"id": model_id,
"object": "model",
"owned_by": "flow2api",
"description": description
})
return {
"object": "list",
"data": models
}
@router.post("/v1/chat/completions")
async def create_chat_completion(
request: ChatCompletionRequest,
api_key: str = Depends(verify_api_key_header)
):
"""Create chat completion (unified endpoint for image and video generation)"""
try:
# Extract prompt from messages
if not request.messages:
raise HTTPException(status_code=400, detail="Messages cannot be empty")
last_message = request.messages[-1]
content = last_message.content
# Handle both string and array format (OpenAI multimodal)
prompt = ""
images: List[bytes] = []
if isinstance(content, str):
# Simple text format
prompt = content
elif isinstance(content, list):
# Multimodal format
for item in content:
if item.get("type") == "text":
prompt = item.get("text", "")
elif item.get("type") == "image_url":
# Extract base64 image
image_url = item.get("image_url", {}).get("url", "")
if image_url.startswith("data:image"):
# Parse base64
match = re.search(r"base64,(.+)", image_url)
if match:
image_base64 = match.group(1)
image_bytes = base64.b64decode(image_base64)
images.append(image_bytes)
# Fallback to deprecated image parameter
if request.image and not images:
if request.image.startswith("data:image"):
match = re.search(r"base64,(.+)", request.image)
if match:
image_base64 = match.group(1)
image_bytes = base64.b64decode(image_base64)
images.append(image_bytes)
if not prompt:
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
# Call generation handler
if request.stream:
# Streaming response
async def generate():
async for chunk in generation_handler.handle_generation(
model=request.model,
prompt=prompt,
images=images if images else None,
stream=True
):
yield chunk
# Send [DONE] signal
yield "data: [DONE]\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no"
}
)
else:
# Non-streaming response
result = None
async for chunk in generation_handler.handle_generation(
model=request.model,
prompt=prompt,
images=images if images else None,
stream=False
):
result = chunk
if result:
# Parse the result JSON string
try:
result_json = json.loads(result)
return JSONResponse(content=result_json)
except json.JSONDecodeError:
# If not JSON, return as-is
return JSONResponse(content={"result": result})
else:
raise HTTPException(status_code=500, detail="Generation failed: No response from handler")
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))