| """API routes - OpenAI compatible endpoints""" |
| from fastapi import APIRouter, Depends, HTTPException |
| from fastapi.responses import StreamingResponse, JSONResponse |
| from typing import List |
| import base64 |
| import re |
| import json |
| from ..core.auth import verify_api_key_header |
| from ..core.models import ChatCompletionRequest |
| from ..services.generation_handler import GenerationHandler, MODEL_CONFIG |
|
|
| router = APIRouter() |
|
|
| |
| generation_handler: GenerationHandler = None |
|
|
|
|
| def set_generation_handler(handler: GenerationHandler): |
| """Set generation handler instance""" |
| global generation_handler |
| generation_handler = handler |
|
|
|
|
| @router.get("/v1/models") |
| async def list_models(api_key: str = Depends(verify_api_key_header)): |
| """List available models""" |
| models = [] |
|
|
| for model_id, config in MODEL_CONFIG.items(): |
| description = f"{config['type'].capitalize()} generation" |
| if config['type'] == 'image': |
| description += f" - {config['model_name']}" |
| else: |
| description += f" - {config['model_key']}" |
|
|
| models.append({ |
| "id": model_id, |
| "object": "model", |
| "owned_by": "flow2api", |
| "description": description |
| }) |
|
|
| return { |
| "object": "list", |
| "data": models |
| } |
|
|
|
|
| @router.post("/v1/chat/completions") |
| async def create_chat_completion( |
| request: ChatCompletionRequest, |
| api_key: str = Depends(verify_api_key_header) |
| ): |
| """Create chat completion (unified endpoint for image and video generation)""" |
| try: |
| |
| if not request.messages: |
| raise HTTPException(status_code=400, detail="Messages cannot be empty") |
|
|
| last_message = request.messages[-1] |
| content = last_message.content |
|
|
| |
| prompt = "" |
| images: List[bytes] = [] |
|
|
| if isinstance(content, str): |
| |
| prompt = content |
| elif isinstance(content, list): |
| |
| for item in content: |
| if item.get("type") == "text": |
| prompt = item.get("text", "") |
| elif item.get("type") == "image_url": |
| |
| image_url = item.get("image_url", {}).get("url", "") |
| if image_url.startswith("data:image"): |
| |
| match = re.search(r"base64,(.+)", image_url) |
| if match: |
| image_base64 = match.group(1) |
| image_bytes = base64.b64decode(image_base64) |
| images.append(image_bytes) |
|
|
| |
| if request.image and not images: |
| if request.image.startswith("data:image"): |
| match = re.search(r"base64,(.+)", request.image) |
| if match: |
| image_base64 = match.group(1) |
| image_bytes = base64.b64decode(image_base64) |
| images.append(image_bytes) |
|
|
| if not prompt: |
| raise HTTPException(status_code=400, detail="Prompt cannot be empty") |
|
|
| |
| if request.stream: |
| |
| async def generate(): |
| async for chunk in generation_handler.handle_generation( |
| model=request.model, |
| prompt=prompt, |
| images=images if images else None, |
| stream=True |
| ): |
| yield chunk |
|
|
| |
| yield "data: [DONE]\n\n" |
|
|
| return StreamingResponse( |
| generate(), |
| media_type="text/event-stream", |
| headers={ |
| "Cache-Control": "no-cache", |
| "Connection": "keep-alive", |
| "X-Accel-Buffering": "no" |
| } |
| ) |
| else: |
| |
| result = None |
| async for chunk in generation_handler.handle_generation( |
| model=request.model, |
| prompt=prompt, |
| images=images if images else None, |
| stream=False |
| ): |
| result = chunk |
|
|
| if result: |
| |
| try: |
| result_json = json.loads(result) |
| return JSONResponse(content=result_json) |
| except json.JSONDecodeError: |
| |
| return JSONResponse(content={"result": result}) |
| else: |
| raise HTTPException(status_code=500, detail="Generation failed: No response from handler") |
|
|
| except HTTPException: |
| raise |
| except Exception as e: |
| raise HTTPException(status_code=500, detail=str(e)) |
|
|