File size: 7,801 Bytes
89b138d ea53c08 89b138d 415ec30 89b138d 415ec30 89b138d 2fc646f 89b138d 2fc646f 89b138d 2fc646f 89b138d 2fc646f 89b138d 2fc646f 415ec30 89b138d 415ec30 89b138d 415ec30 2fc646f 415ec30 2fc646f 415ec30 6c8dce7 415ec30 63b36c2 415ec30 1120bba 89b138d 415ec30 89b138d 415ec30 89b138d ea53c08 415ec30 89b138d ea53c08 415ec30 89b138d 415ec30 de4d166 415ec30 de4d166 415ec30 de4d166 415ec30 97aa2c2 89b138d 415ec30 89b138d 415ec30 89b138d 415ec30 89b138d 415ec30 89b138d 415ec30 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import os
import httpx
import json
import time
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from typing import List, Dict, Any, Optional, Union, Literal
from dotenv import load_dotenv
from sse_starlette.sse import EventSourceResponse
# Load environment variables
load_dotenv()
REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN")
if not REPLICATE_API_TOKEN:
raise ValueError("REPLICATE_API_TOKEN environment variable not set.")
# FastAPI Init
app = FastAPI(title="Replicate to OpenAI Compatibility Layer", version="4.0.0 (Docs Compliant)")
# --- Pydantic Models ---
class ModelCard(BaseModel):
id: str; object: str = "model"; created: int = Field(default_factory=lambda: int(time.time())); owned_by: str = "replicate"
class ModelList(BaseModel):
object: str = "list"; data: List[ModelCard] = []
class ChatMessage(BaseModel):
role: Literal["system", "user", "assistant", "tool"]; content: Union[str, List[Dict[str, Any]]]
class OpenAIChatCompletionRequest(BaseModel):
model: str; messages: List[ChatMessage]; temperature: Optional[float] = 0.7; top_p: Optional[float] = 1.0; max_tokens: Optional[int] = None; stream: Optional[bool] = False
# --- Supported Models ---
# Maps OpenAI-friendly names to Replicate model paths
SUPPORTED_MODELS = {
"llama3-8b-instruct": "meta/meta-llama-3-8b-instruct",
"claude-4.5-haiku": "anthropic/claude-4.5-haiku"
}
# --- Core Logic ---
def prepare_replicate_input(request: OpenAIChatCompletionRequest, replicate_model_id: str) -> Dict[str, Any]:
"""Formats the input specifically for the requested Replicate model."""
payload = {}
# Claude on Replicate strictly requires a 'prompt' string, not 'messages' array.
if "anthropic/claude" in replicate_model_id:
prompt_parts = []
system_prompt = None
for msg in request.messages:
if msg.role == "system":
# Extract system prompt if present
system_prompt = str(msg.content)
elif msg.role == "user":
# Handle both simple string content and list content (for potential future vision support)
content = msg.content
if isinstance(content, list):
text_parts = [item.get("text", "") for item in content if item.get("type") == "text"]
content = " ".join(text_parts)
prompt_parts.append(f"User: {content}")
elif msg.role == "assistant":
prompt_parts.append(f"Assistant: {msg.content}")
# Standard Claude prompting convention
prompt_parts.append("Assistant:")
payload["prompt"] = "\n\n".join(prompt_parts)
if system_prompt:
payload["system_prompt"] = system_prompt
# Llama 3 and others often support the 'messages' array natively.
else:
# Convert Pydantic models to pure dicts
payload["prompt"] = [msg.dict() for msg in request.messages]
# Map common OpenAI parameters to Replicate equivalents
if request.max_tokens: payload["max_new_tokens"] = request.max_tokens
if request.temperature: payload["temperature"] = request.temperature
if request.top_p: payload["top_p"] = request.top_p
return payload
async def stream_replicate_sse(replicate_model_id: str, input_payload: dict):
"""Handles the full streaming lifecycle using standard Replicate endpoints."""
# 1. Start Prediction specifically at the named model endpoint
url = f"https://api.replicate.com/v1/models/{replicate_model_id}/predictions"
headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json"}
async with httpx.AsyncClient(timeout=60.0) as client:
try:
# Explicitly request stream=True in the body, though often implicit
response = await client.post(url, headers=headers, json={"input": input_payload, "stream": True})
response.raise_for_status()
prediction = response.json()
stream_url = prediction.get("urls", {}).get("stream")
prediction_id = prediction.get("id")
if not stream_url:
yield json.dumps({"error": {"message": "Model did not return a stream URL."}})
return
except httpx.HTTPStatusError as e:
yield json.dumps({"error": {"message": e.response.text, "type": "upstream_error"}})
return
# 2. Connect to the provided Stream URL
async with client.stream("GET", stream_url, headers={"Accept": "text/event-stream"}, timeout=None) as sse:
current_event = None
async for line in sse.aiter_lines():
if line.startswith("event:"):
current_event = line[len("event:"):].strip()
elif line.startswith("data:"):
data = line[len("data:"):].strip()
if current_event == "output":
# CRITICAL: Wrap in try/except to ignore empty keep-alive lines that crash standard parsers
try:
# Replicate sometimes sends raw strings, sometimes JSON.
# For chat models, it's usually a raw string token.
# We try to load as JSON first, if it fails, use raw data.
try:
content = json.loads(data)
except json.JSONDecodeError:
content = data
if content: # Ensure we don't send empty chunks
chunk = {
"id": prediction_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": replicate_model_id,
"choices": [{"index": 0, "delta": {"content": content}, "finish_reason": None}]
}
yield json.dumps(chunk)
except Exception:
pass # Safely ignore malformed lines
elif current_event == "done":
break
# 3. Send final [DONE] event
yield json.dumps({"id": prediction_id, "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]})
yield "[DONE]"
# --- Endpoints ---
@app.get("/v1/models")
async def list_models():
return ModelList(data=[ModelCard(id=k) for k in SUPPORTED_MODELS.keys()])
@app.post("/v1/chat/completions")
async def create_chat_completion(request: OpenAIChatCompletionRequest):
if request.model not in SUPPORTED_MODELS:
raise HTTPException(404, f"Model not found. Available: {list(SUPPORTED_MODELS.keys())}")
replicate_id = SUPPORTED_MODELS[request.model]
replicate_input = prepare_replicate_input(request, replicate_id)
if request.stream:
return EventSourceResponse(stream_replicate_sse(replicate_id, replicate_input))
# Non-streaming fallback
url = f"https://api.replicate.com/v1/models/{replicate_id}/predictions"
headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json", "Prefer": "wait=60"}
async with httpx.AsyncClient() as client:
resp = await client.post(url, headers=headers, json={"input": replicate_input})
if resp.is_error: raise HTTPException(resp.status_code, resp.text)
pred = resp.json()
output = "".join(pred.get("output", []))
return {"id": pred["id"], "choices": [{"message": {"role": "assistant", "content": output}, "finish_reason": "stop"}]} |