File size: 8,013 Bytes
89b138d ea53c08 89b138d 415ec30 89b138d 415ec30 91b7eb3 89b138d 2fc646f 89b138d 2fc646f 89b138d 2fc646f 89b138d 2fc646f 89b138d 2fc646f 415ec30 89b138d 415ec30 89b138d 415ec30 e014ad9 91b7eb3 e014ad9 415ec30 2fc646f 91b7eb3 e014ad9 91b7eb3 e014ad9 91b7eb3 e014ad9 91b7eb3 e014ad9 91b7eb3 415ec30 63b36c2 415ec30 1120bba 89b138d 415ec30 89b138d 415ec30 89b138d ea53c08 e014ad9 89b138d ea53c08 415ec30 89b138d e014ad9 91b7eb3 e014ad9 415ec30 e014ad9 91b7eb3 de4d166 415ec30 e014ad9 de4d166 e014ad9 97aa2c2 89b138d 415ec30 89b138d e014ad9 415ec30 89b138d e014ad9 415ec30 e014ad9 89b138d 415ec30 e014ad9 89b138d e014ad9 415ec30 91b7eb3 415ec30 e014ad9 91b7eb3 e014ad9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import os
import httpx
import json
import time
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from typing import List, Dict, Any, Optional, Union, Literal
from dotenv import load_dotenv
from sse_starlette.sse import EventSourceResponse
# Load environment variables
load_dotenv()
REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN")
if not REPLICATE_API_TOKEN:
raise ValueError("REPLICATE_API_TOKEN environment variable not set.")
# FastAPI Init
app = FastAPI(title="Replicate to OpenAI Compatibility Layer", version="4.2.0 (Prompt Format Fixed)")
# --- Pydantic Models ---
class ModelCard(BaseModel):
id: str; object: str = "model"; created: int = Field(default_factory=lambda: int(time.time())); owned_by: str = "replicate"
class ModelList(BaseModel):
object: str = "list"; data: List[ModelCard] = []
class ChatMessage(BaseModel):
role: Literal["system", "user", "assistant", "tool"]; content: Union[str, List[Dict[str, Any]]]
class OpenAIChatCompletionRequest(BaseModel):
model: str; messages: List[ChatMessage]; temperature: Optional[float] = 0.7; top_p: Optional[float] = 1.0; max_tokens: Optional[int] = None; stream: Optional[bool] = False
# --- Supported Models ---
SUPPORTED_MODELS = {
"llama3-8b-instruct": "meta/meta-llama-3-8b-instruct",
"claude-4.5-haiku": "anthropic/claude-4.5-haiku"
}
# --- Core Logic ---
def prepare_replicate_input(request: OpenAIChatCompletionRequest) -> Dict[str, Any]:
"""
Formats the input for Replicate API. This function now correctly builds a
single prompt string from the message history, which is required by
Replicate's endpoints for models like Claude and Llama 3.
"""
payload = {}
# --- PROMPT FORMAT FIX START ---
prompt_parts = []
system_prompt = None
for msg in request.messages:
if msg.role == "system":
# Extract system prompt, as it's a separate parameter for many models
system_prompt = str(msg.content)
elif msg.role == "user":
# Format user messages
content = msg.content
if isinstance(content, list): # Handle potential future vision models
text_parts = [item.get("text", "") for item in content if item.get("type") == "text"]
content = " ".join(text_parts)
prompt_parts.append(f"User: {content}")
elif msg.role == "assistant":
# Format assistant messages
prompt_parts.append(f"Assistant: {msg.content}")
# Add the final "Assistant:" turn to prompt the model for a response.
# This is a standard convention for many chat models when using a single prompt string.
prompt_parts.append("Assistant:")
# The main input is a single 'prompt' string with turns separated by newlines.
payload["prompt"] = "\n\n".join(prompt_parts)
if system_prompt:
payload["system_prompt"] = system_prompt
# --- PROMPT FORMAT FIX END ---
# Map common OpenAI parameters to Replicate equivalents
if request.max_tokens: payload["max_new_tokens"] = request.max_tokens
if request.temperature: payload["temperature"] = request.temperature
if request.top_p: payload["top_p"] = request.top_p
return payload
async def stream_replicate_sse(replicate_model_id: str, input_payload: dict):
"""Handles the full streaming lifecycle using standard Replicate endpoints."""
url = f"https://api.replicate.com/v1/models/{replicate_model_id}/predictions"
headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json"}
async with httpx.AsyncClient(timeout=60.0) as client:
try:
response = await client.post(url, headers=headers, json={"input": input_payload, "stream": True})
response.raise_for_status()
prediction = response.json()
stream_url = prediction.get("urls", {}).get("stream")
prediction_id = prediction.get("id", "stream-unknown")
if not stream_url:
yield json.dumps({"error": {"message": "Model did not return a stream URL."}})
return
except httpx.HTTPStatusError as e:
error_details = e.response.text
try:
error_json = e.response.json()
error_details = error_json.get("detail", error_details)
except json.JSONDecodeError:
pass
yield json.dumps({"error": {"message": f"Upstream Error: {error_details}", "type": "replicate_error"}})
return
try:
async with client.stream("GET", stream_url, headers={"Accept": "text/event-stream"}, timeout=None) as sse:
current_event = None
async for line in sse.aiter_lines():
if line.startswith("event:"):
current_event = line[len("event:"):].strip()
elif line.startswith("data:"):
data = line[len("data:"):].strip()
if current_event == "output":
if data:
chunk = {
"id": prediction_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": replicate_model_id,
"choices": [{"index": 0, "delta": {"content": data}, "finish_reason": None}]
}
yield json.dumps(chunk)
elif current_event == "done":
break
except httpx.ReadTimeout:
yield json.dumps({"error": {"message": "Stream timed out.", "type": "timeout_error"}})
return
final_chunk = {
"id": prediction_id, "object": "chat.completion.chunk", "created": int(time.time()), "model": replicate_model_id,
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]
}
yield json.dumps(final_chunk)
yield "[DONE]"
# --- Endpoints ---
@app.get("/v1/models")
async def list_models():
"""Lists the currently supported models."""
return ModelList(data=[ModelCard(id=k) for k in SUPPORTED_MODELS.keys()])
@app.post("/v1/chat/completions")
async def create_chat_completion(request: OpenAIChatCompletionRequest):
"""Handles chat completion requests, streaming or non-streaming."""
if request.model not in SUPPORTED_MODELS:
raise HTTPException(status_code=404, detail=f"Model not found. Available models: {list(SUPPORTED_MODELS.keys())}")
replicate_id = SUPPORTED_MODELS[request.model]
replicate_input = prepare_replicate_input(request)
if request.stream:
return EventSourceResponse(stream_replicate_sse(replicate_id, replicate_input), media_type="text/event-stream")
# Non-streaming fallback
url = f"https://api.replicate.com/v1/models/{replicate_id}/predictions"
headers = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}", "Content-Type": "application/json", "Prefer": "wait=120"}
async with httpx.AsyncClient() as client:
try:
resp = await client.post(url, headers=headers, json={"input": replicate_input}, timeout=130.0)
resp.raise_for_status()
pred = resp.json()
output = "".join(pred.get("output", []))
return {
"id": pred.get("id"), "object": "chat.completion", "created": int(time.time()), "model": request.model,
"choices": [{"index": 0, "message": {"role": "assistant", "content": output}, "finish_reason": "stop"}],
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
}
except httpx.HTTPStatusError as e:
raise HTTPException(status_code=e.response.status_code, detail=f"Error from Replicate API: {e.response.text}") |