| | |
| | """ |
| | OpenAI-compatible API server for Elizabeth model. |
| | Provides /v1/chat/completions endpoint with full compatibility. |
| | """ |
| |
|
| | import os |
| | import time |
| | import json |
| | import logging |
| | from typing import List, Dict, Any |
| |
|
| | from fastapi import FastAPI, HTTPException, Depends, Header |
| | from fastapi.middleware.cors import CORSMiddleware |
| | from fastapi.responses import JSONResponse, StreamingResponse |
| | from pydantic import BaseModel, Field |
| | import torch |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
|
| | |
| | logging.basicConfig(level=logging.INFO) |
| | logger = logging.getLogger(__name__) |
| |
|
| | |
| | MODEL_PATH = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple/" |
| | PORT = 8000 |
| | HOST = "0.0.0.0" |
| | API_KEY = os.getenv("API_KEY", "elizabeth-secret-key-2025") |
| |
|
| | |
| | class ChatMessage(BaseModel): |
| | role: str = Field(..., description="Role of the message sender") |
| | content: str = Field(..., description="Content of the message") |
| |
|
| | class ChatCompletionRequest(BaseModel): |
| | model: str = Field(..., description="Model to use for completion") |
| | messages: List[ChatMessage] = Field(..., description="List of messages") |
| | temperature: float = Field(0.7, ge=0.0, le=2.0, description="Sampling temperature") |
| | max_tokens: int = Field(1024, ge=1, le=4096, description="Maximum tokens to generate") |
| | stream: bool = Field(False, description="Whether to stream the response") |
| | top_p: float = Field(1.0, ge=0.0, le=1.0, description="Nucleus sampling parameter") |
| |
|
| | class ChatCompletionChoice(BaseModel): |
| | index: int |
| | message: ChatMessage |
| | finish_reason: str |
| |
|
| | class ChatCompletionUsage(BaseModel): |
| | prompt_tokens: int |
| | completion_tokens: int |
| | total_tokens: int |
| |
|
| | class ChatCompletionResponse(BaseModel): |
| | id: str |
| | object: str = "chat.completion" |
| | created: int |
| | model: str |
| | choices: List[ChatCompletionChoice] |
| | usage: ChatCompletionUsage |
| |
|
| | |
| | app = FastAPI(title="Elizabeth API", version="1.0.0") |
| |
|
| | |
| | app.add_middleware( |
| | CORSMiddleware, |
| | allow_origins=["*"], |
| | allow_credentials=True, |
| | allow_methods=["*"], |
| | allow_headers=["*"], |
| | ) |
| |
|
| | |
| | model = None |
| | tokenizer = None |
| |
|
| | def load_model(): |
| | """Load the model and tokenizer.""" |
| | global model, tokenizer |
| | |
| | logger.info("Loading model and tokenizer...") |
| | start_time = time.time() |
| | |
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True) |
| | model = AutoModelForCausalLM.from_pretrained( |
| | MODEL_PATH, |
| | torch_dtype=torch.bfloat16, |
| | device_map="auto", |
| | trust_remote_code=True |
| | ) |
| | |
| | load_time = time.time() - start_time |
| | logger.info(f"Model loaded in {load_time:.2f} seconds") |
| | logger.info(f"Model device: {model.device}") |
| | logger.info(f"Model dtype: {model.dtype}") |
| |
|
| | def authenticate_token(authorization: str = Header(None)): |
| | """Authenticate API requests.""" |
| | if authorization is None: |
| | raise HTTPException(status_code=401, detail="Authorization header required") |
| | |
| | if not authorization.startswith("Bearer "): |
| | raise HTTPException(status_code=401, detail="Invalid authorization format") |
| | |
| | token = authorization[7:] |
| | if token != API_KEY: |
| | raise HTTPException(status_code=401, detail="Invalid API key") |
| | |
| | return token |
| |
|
| | @app.on_event("startup") |
| | async def startup_event(): |
| | """Load model on startup.""" |
| | load_model() |
| |
|
| | @app.get("/health") |
| | async def health_check(): |
| | """Health check endpoint.""" |
| | return { |
| | "status": "healthy", |
| | "model_loaded": model is not None, |
| | "model_device": str(model.device) if model else None, |
| | "timestamp": time.time() |
| | } |
| |
|
| | @app.get("/metrics") |
| | async def metrics(): |
| | """Prometheus metrics endpoint.""" |
| | |
| | return { |
| | "requests_processed": 0, |
| | "average_latency": 0.0, |
| | "error_rate": 0.0 |
| | } |
| |
|
| | @app.post("/v1/chat/completions") |
| | async def chat_completions( |
| | request: ChatCompletionRequest, |
| | token: str = Depends(authenticate_token) |
| | ): |
| | """OpenAI-compatible chat completions endpoint.""" |
| | |
| | if model is None or tokenizer is None: |
| | raise HTTPException(status_code=503, detail="Model not loaded") |
| | |
| | try: |
| | |
| | prompt = "" |
| | for msg in request.messages: |
| | prompt += f"{msg.role}: {msg.content}\n" |
| | prompt += "Assistant:" |
| | |
| | |
| | inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
| | |
| | |
| | start_time = time.time() |
| | |
| | with torch.no_grad(): |
| | outputs = model.generate( |
| | **inputs, |
| | max_new_tokens=request.max_tokens, |
| | do_sample=True, |
| | temperature=request.temperature, |
| | top_p=request.top_p, |
| | pad_token_id=tokenizer.eos_token_id |
| | ) |
| | |
| | generation_time = time.time() - start_time |
| | |
| | |
| | full_response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| | assistant_response = full_response[len(prompt):].strip() |
| | |
| | |
| | prompt_tokens = len(inputs.input_ids[0]) |
| | completion_tokens = len(outputs[0]) - prompt_tokens |
| | |
| | |
| | response = ChatCompletionResponse( |
| | id=f"chatcmpl-{int(time.time())}", |
| | created=int(time.time()), |
| | model=request.model, |
| | choices=[ |
| | ChatCompletionChoice( |
| | index=0, |
| | message=ChatMessage( |
| | role="assistant", |
| | content=assistant_response |
| | ), |
| | finish_reason="stop" |
| | ) |
| | ], |
| | usage=ChatCompletionUsage( |
| | prompt_tokens=prompt_tokens, |
| | completion_tokens=completion_tokens, |
| | total_tokens=prompt_tokens + completion_tokens |
| | ) |
| | ) |
| | |
| | logger.info(f"Generated response in {generation_time:.2f}s, tokens: {completion_tokens}") |
| | |
| | return response |
| | |
| | except Exception as e: |
| | logger.error(f"Error generating response: {e}") |
| | raise HTTPException(status_code=500, detail=f"Generation error: {str(e)}") |
| |
|
| | @app.get("/") |
| | async def root(): |
| | """Root endpoint with information.""" |
| | return { |
| | "message": "Elizabeth API Server", |
| | "version": "1.0.0", |
| | "model": "qwen3-8b-elizabeth-simple", |
| | "endpoints": { |
| | "chat": "/v1/chat/completions", |
| | "health": "/health", |
| | "metrics": "/metrics" |
| | } |
| | } |
| |
|
| | if __name__ == "__main__": |
| | import uvicorn |
| | |
| | logger.info(f"Starting Elizabeth API server on {HOST}:{PORT}") |
| | uvicorn.run(app, host=HOST, port=PORT) |