|
|
import os |
|
|
import httpx |
|
|
import json |
|
|
import time |
|
|
from fastapi import FastAPI, HTTPException |
|
|
from fastapi.responses import JSONResponse |
|
|
from pydantic import BaseModel, Field |
|
|
from typing import List, Dict, Any, Optional, Union, Literal |
|
|
from dotenv import load_dotenv |
|
|
from sse_starlette.sse import EventSourceResponse |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN") |
|
|
if not REPLICATE_API_TOKEN: |
|
|
raise ValueError("REPLICATE_API_TOKEN environment variable not set.") |
|
|
|
|
|
|
|
|
app = FastAPI(title="Replicate to OpenAI Compatibility Layer", version="4.0.0 (Docs Compliant)") |
|
|
|
|
|
|
|
|
class ModelCard(BaseModel): |
|
|
id: str; object: str = "model"; created: int = Field(default_factory=lambda: int(time.time())); owned_by: str = "replicate" |
|
|
class ModelList(BaseModel): |
|
|
object: str = "list"; data: List[ModelCard] = [] |
|
|
class ChatMessage(BaseModel): |
|
|
role: Literal["system", "user", "assistant", "tool"]; content: Union[str, List[Dict[str, Any]]] |
|
|
class OpenAIChatCompletionRequest(BaseModel): |
|
|
model: str; messages: List[ChatMessage]; temperature: Optional[float] = 0.7; top_p: Optional[float] = 1.0; max_tokens: Optional[int] = None; stream: Optional[bool] = False |
|
|
|
|
|
|
|
|
|
|
|
SUPPORTED_MODELS = { |
|
|
"llama3-8b-instruct": "meta/meta-llama-3-8b-instruct", |
|
|
"claude-4.5-haiku": "anthropic/claude-4.5-haiku" |
|
|
} |
|
|
|
|
|
|
|
|
def prepare_replicate_input(request: OpenAIChatCompletionRequest, replicate_model_id: str) -> Dict[str, Any]: |
|
|
"""Formats the input specifically for the requested Replicate model.""" |
|
|
payload = {} |
|
|
|
|
|
|
|
|
if "anthropic/claude" in replicate_model_id: |
|
|
prompt_parts = [] |
|
|
system_prompt = None |
|
|
for msg in request.messages: |
|
|
if msg.role == "system": |
|
|
|
|
|
system_prompt = str(msg.content) |
|
|
elif msg.role == "user": |
|
|
|
|
|
content = msg.content |
|
|
if isinstance(content, list): |
|
|
text_parts = [item.get("text", "") for item in content if item.get("type") == "text"] |
|
|
content = " ".join(text_parts) |
|
|
prompt_parts.append(f"User: {content}") |
|
|
elif msg.role == "assistant": |
|
|
prompt_parts.append(f"Assistant: {msg.content}") |
|
|
|
|
|
|
|
|
prompt_parts.append("Assistant:") |
|
|
payload["prompt"] = "\n\n".join(prompt_parts) |
|
|
if system_prompt: |
|
|
payload["system_prompt"] = system_prompt |
|
|
|
|
|
|
|
|
else: |
|
|
|
|
|
payload["prompt"] = [msg.dict() for msg in request.messages] |
|
|
|
|
|
|
|
|
if request.max_tokens: payload["max_new_tokens"] = request.max_tokens |
|
|
if request.temperature: payload["temperature"] = request.temperature |
|
|
if request.top_p: payload["top_p"] = request.top_p |
|
|
|
|
|
return payload |
|
|
|
|
|
async def stream_replicate_sse(replicate_model_id: str, input_payload: dict): |
|
|
"""Handles the full streaming lifecycle using standard Replicate endpoints.""" |
|
|
|
|
|
url = f"https://api.replicate.com/v1/models/{replicate_model_id}/predictions" |
|
|
headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json"} |
|
|
|
|
|
async with httpx.AsyncClient(timeout=60.0) as client: |
|
|
try: |
|
|
|
|
|
response = await client.post(url, headers=headers, json={"input": input_payload, "stream": True}) |
|
|
response.raise_for_status() |
|
|
prediction = response.json() |
|
|
stream_url = prediction.get("urls", {}).get("stream") |
|
|
prediction_id = prediction.get("id") |
|
|
|
|
|
if not stream_url: |
|
|
yield json.dumps({"error": {"message": "Model did not return a stream URL."}}) |
|
|
return |
|
|
|
|
|
except httpx.HTTPStatusError as e: |
|
|
yield json.dumps({"error": {"message": e.response.text, "type": "upstream_error"}}) |
|
|
return |
|
|
|
|
|
|
|
|
async with client.stream("GET", stream_url, headers={"Accept": "text/event-stream"}, timeout=None) as sse: |
|
|
current_event = None |
|
|
async for line in sse.aiter_lines(): |
|
|
if line.startswith("event:"): |
|
|
current_event = line[len("event:"):].strip() |
|
|
elif line.startswith("data:"): |
|
|
data = line[len("data:"):].strip() |
|
|
|
|
|
if current_event == "output": |
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
content = json.loads(data) |
|
|
except json.JSONDecodeError: |
|
|
content = data |
|
|
|
|
|
if content: |
|
|
chunk = { |
|
|
"id": prediction_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": replicate_model_id, |
|
|
"choices": [{"index": 0, "delta": {"content": content}, "finish_reason": None}] |
|
|
} |
|
|
yield json.dumps(chunk) |
|
|
except Exception: |
|
|
pass |
|
|
|
|
|
elif current_event == "done": |
|
|
break |
|
|
|
|
|
|
|
|
yield json.dumps({"id": prediction_id, "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]}) |
|
|
yield "[DONE]" |
|
|
|
|
|
|
|
|
@app.get("/v1/models") |
|
|
async def list_models(): |
|
|
return ModelList(data=[ModelCard(id=k) for k in SUPPORTED_MODELS.keys()]) |
|
|
|
|
|
@app.post("/v1/chat/completions") |
|
|
async def create_chat_completion(request: OpenAIChatCompletionRequest): |
|
|
if request.model not in SUPPORTED_MODELS: |
|
|
raise HTTPException(404, f"Model not found. Available: {list(SUPPORTED_MODELS.keys())}") |
|
|
|
|
|
replicate_id = SUPPORTED_MODELS[request.model] |
|
|
replicate_input = prepare_replicate_input(request, replicate_id) |
|
|
|
|
|
if request.stream: |
|
|
return EventSourceResponse(stream_replicate_sse(replicate_id, replicate_input)) |
|
|
|
|
|
|
|
|
url = f"https://api.replicate.com/v1/models/{replicate_id}/predictions" |
|
|
headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json", "Prefer": "wait=60"} |
|
|
async with httpx.AsyncClient() as client: |
|
|
resp = await client.post(url, headers=headers, json={"input": replicate_input}) |
|
|
if resp.is_error: raise HTTPException(resp.status_code, resp.text) |
|
|
pred = resp.json() |
|
|
output = "".join(pred.get("output", [])) |
|
|
return {"id": pred["id"], "choices": [{"message": {"role": "assistant", "content": output}, "finish_reason": "stop"}]} |