import os import httpx import json import time from fastapi import FastAPI, HTTPException from fastapi.responses import JSONResponse from pydantic import BaseModel, Field from typing import List, Dict, Any, Optional, Union, Literal from dotenv import load_dotenv from sse_starlette.sse import EventSourceResponse # Load environment variables from .env file load_dotenv() # --- Configuration --- REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN") if not REPLICATE_API_TOKEN: raise ValueError("REPLICATE_API_TOKEN environment variable not set.") # --- FastAPI App Initialization --- app = FastAPI( title="Replicate to OpenAI Compatibility Layer", version="2.1.0 (Model Input Fixed)", ) # --- Pydantic Models --- class ModelCard(BaseModel): id: str; object: str = "model"; created: int = Field(default_factory=lambda: int(time.time())); owned_by: str = "replicate" class ModelList(BaseModel): object: str = "list"; data: List[ModelCard] = [] class ChatMessage(BaseModel): role: Literal["system", "user", "assistant", "tool"]; content: Union[str, List[Dict[str, Any]]] class OpenAIChatCompletionRequest(BaseModel): model: str; messages: List[ChatMessage]; temperature: Optional[float] = 0.7; top_p: Optional[float] = 1.0; max_tokens: Optional[int] = None; stream: Optional[bool] = False # --- Model Mapping --- SUPPORTED_MODELS = { "llama3-8b-instruct": "meta/meta-llama-3-8b-instruct", "claude-4.5-haiku": "anthropic/claude-4.5-haiku" } # --- Helper Functions --- def prepare_replicate_input(request: OpenAIChatCompletionRequest) -> Dict[str, Any]: """ Prepares the input payload for Replicate, handling model-specific formats. """ payload = {} # *** THIS IS THE CRITICAL FIX *** # Claude models on Replicate require a single 'prompt' string. # We must convert the 'messages' array into a formatted string. if "claude" in request.model: prompt_parts = [] system_prompt = None image_url = None for msg in request.messages: if msg.role == "system": system_prompt = str(msg.content) elif msg.role == "user": if isinstance(msg.content, list): # Vision case for item in msg.content: if item.get("type") == "text": prompt_parts.append(f"User: {item.get('text', '')}") elif item.get("type") == "image_url": image_url = item.get("image_url", {}).get("url") else: # Text-only case prompt_parts.append(f"User: {msg.content}") elif msg.role == "assistant": prompt_parts.append(f"Assistant: {msg.content}") payload["prompt"] = "\n".join(prompt_parts) if system_prompt: payload["system_prompt"] = system_prompt if image_url: payload["image"] = image_url # Other models like Llama-3 accept the 'messages' array directly. else: payload["messages"] = [msg.dict() for msg in request.messages] # Add common parameters if request.max_tokens is not None: payload["max_new_tokens"] = request.max_tokens if request.temperature is not None: payload["temperature"] = request.temperature if request.top_p is not None: payload["top_p"] = request.top_p return payload async def stream_replicate_native_sse(model_id: str, payload: dict): """Connects to Replicate's native SSE stream for token-by-token streaming.""" url = f"https://api.replicate.com/v1/models/{model_id}/predictions" headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json"} async with httpx.AsyncClient(timeout=300) as client: try: response = await client.post(url, headers=headers, json={"input": payload, "stream": True}) response.raise_for_status() prediction = response.json() stream_url = prediction.get("urls", {}).get("stream") if not stream_url: error_detail = prediction.get("detail", "Failed to get stream URL.") yield json.dumps({"error": {"message": error_detail}}) return except httpx.HTTPStatusError as e: try: error_body = e.response.json() yield json.dumps({"error": {"message": json.dumps(error_body)}}) except json.JSONDecodeError: yield json.dumps({"error": {"message": e.response.text}}) return try: async with client.stream("GET", stream_url, headers={"Accept": "text/event-stream"}) as sse: sse.raise_for_status() current_event = "" async for line in sse.aiter_lines(): if line.startswith("event:"): current_event = line[len("event:"):].strip() elif line.startswith("data:"): data = line[len("data:"):].strip() if current_event == "output": chunk = { "id": prediction["id"], "object": "chat.completion.chunk", "created": int(time.time()), "model": model_id, "choices": [{"index": 0, "delta": {"content": json.loads(data)}, "finish_reason": None}] } yield json.dumps(chunk) elif current_event == "done": break except Exception as e: yield json.dumps({"error": {"message": f"Streaming error: {str(e)}"}}) done_chunk = { "id": prediction["id"], "object": "chat.completion.chunk", "created": int(time.time()), "model": model_id, "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}] } yield json.dumps(done_chunk) yield "[DONE]" # --- API Endpoints --- @app.get("/v1/models", response_model=ModelList) async def list_models(): return ModelList(data=[ModelCard(id=model_name) for model_name in SUPPORTED_MODELS.keys()]) @app.post("/v1/chat/completions") async def create_chat_completion(request: OpenAIChatCompletionRequest): model_key = request.model if model_key not in SUPPORTED_MODELS: raise HTTPException(status_code=404, detail=f"Model not found. Supported models: {list(SUPPORTED_MODELS.keys())}") replicate_model_id = SUPPORTED_MODELS[model_key] replicate_input = prepare_replicate_input(request) if request.stream: return EventSourceResponse(stream_replicate_native_sse(replicate_model_id, replicate_input)) # Synchronous request url = f"https://api.replicate.com/v1/models/{replicate_model_id}/predictions" headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json", "Prefer": "wait=120"} async with httpx.AsyncClient(timeout=150) as client: try: response = await client.post(url, headers=headers, json={"input": replicate_input}) response.raise_for_status() prediction = response.json() output = "".join(prediction.get("output", [])) return JSONResponse(content={ "id": prediction["id"], "object": "chat.completion", "created": int(time.time()), "model": model_key, "choices": [{"index": 0, "message": {"role": "assistant", "content": output}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0} }) except httpx.HTTPStatusError as e: raise HTTPException(status_code=e.response.status_code, detail=e.response.text)