|
|
| """
|
| OpenAI-compatible API server for Ministral 14B with streaming support
|
| """
|
|
|
|
|
| import subprocess
|
| import sys
|
|
|
| def install_deps():
|
|
|
| try:
|
| import torch
|
| need_torch = not torch.cuda.is_available()
|
| except ImportError:
|
| need_torch = True
|
|
|
| print("=== Installing dependencies ===")
|
|
|
| if need_torch:
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "torch"])
|
|
|
|
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
|
| "git+https://github.com/huggingface/transformers.git"])
|
|
|
|
|
| subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
|
| "accelerate", "fastapi", "uvicorn", "pydantic", "sentencepiece", "protobuf"])
|
|
|
| print("=== Dependencies installed ===")
|
|
|
| install_deps()
|
|
|
| import torch
|
| from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer
|
| from fastapi import FastAPI
|
| from fastapi.responses import StreamingResponse
|
| from pydantic import BaseModel
|
| from typing import List, Optional
|
| import uvicorn
|
| import time
|
| import traceback
|
| import json
|
| import asyncio
|
| from threading import Thread
|
|
|
| app = FastAPI()
|
|
|
| def fix_bpe_tokens(text):
|
| """Fix BPE tokenization artifacts"""
|
| text = text.replace("Ġ", " ")
|
| text = text.replace("Ċ", "\n")
|
| text = text.replace("ĉ", "\t")
|
| text = text.replace("âĢĻ", "'")
|
| text = text.replace("âĢľ", '"')
|
| text = text.replace("âĢĿ", '"')
|
| text = text.replace("âĢĶ", "—")
|
| text = text.replace("âĢĵ", "–")
|
| text = text.replace("â̦", "…")
|
| text = text.replace("âĢĺ", "'")
|
| return text
|
|
|
|
|
| model = None
|
| processor = None
|
|
|
| class Message(BaseModel):
|
| role: str
|
| content: str
|
|
|
| class ChatRequest(BaseModel):
|
| model: str = "ministral-14b"
|
| messages: List[Message]
|
| max_tokens: Optional[int] = 2048
|
| temperature: Optional[float] = 0.7
|
| top_p: Optional[float] = 0.9
|
| top_k: Optional[int] = None
|
| min_p: Optional[float] = None
|
| typical_p: Optional[float] = None
|
| repetition_penalty: Optional[float] = None
|
| no_repeat_ngram_size: Optional[int] = None
|
| stream: Optional[bool] = False
|
|
|
| @app.on_event("startup")
|
| async def load_model():
|
| global model, processor
|
| print("Loading Ministral 14B...")
|
|
|
| model_id = "RoleModel/ministral-14b-merged-official"
|
|
|
| processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
|
|
|
| model = AutoModelForImageTextToText.from_pretrained(
|
| model_id,
|
| torch_dtype=torch.bfloat16,
|
| device_map="auto",
|
| trust_remote_code=True,
|
| )
|
| model.eval()
|
| print("Model loaded successfully!")
|
|
|
| @app.post("/v1/chat/completions")
|
| async def chat_completions(request: ChatRequest):
|
| global model, processor
|
|
|
| try:
|
|
|
| messages = [{"role": m.role, "content": m.content} for m in request.messages]
|
| print(f"Processing {len(messages)} messages, stream={request.stream}")
|
|
|
|
|
| try:
|
| chat_text = processor.apply_chat_template(
|
| messages,
|
| tokenize=False,
|
| add_generation_prompt=True
|
| )
|
| except Exception as e:
|
| print(f"Chat template error: {e}")
|
| chat_text = "<s>"
|
| for m in messages:
|
| if m["role"] == "system":
|
| chat_text += f"[SYSTEM_PROMPT]{m['content']}[/SYSTEM_PROMPT]"
|
| elif m["role"] == "user":
|
| chat_text += f"[INST]{m['content']}[/INST]"
|
| elif m["role"] == "assistant":
|
| chat_text += f"{m['content']}</s>"
|
|
|
|
|
| inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
|
| input_len = inputs["input_ids"].shape[1]
|
| print(f"Input tokens: {input_len}")
|
|
|
| if request.stream:
|
|
|
| async def generate_stream():
|
| streamer = TextIteratorStreamer(
|
| processor.tokenizer,
|
| skip_prompt=True,
|
| skip_special_tokens=True
|
| )
|
|
|
| generation_kwargs = {
|
| **inputs,
|
| "max_new_tokens": request.max_tokens,
|
| "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
|
| "top_p": request.top_p if request.top_p else 0.9,
|
| "do_sample": request.temperature is not None and request.temperature > 0,
|
| "pad_token_id": processor.tokenizer.eos_token_id,
|
| "streamer": streamer,
|
| }
|
|
|
| if request.top_k is not None:
|
| generation_kwargs["top_k"] = request.top_k
|
| if request.min_p is not None:
|
| generation_kwargs["min_p"] = request.min_p
|
| if request.typical_p is not None:
|
| generation_kwargs["typical_p"] = request.typical_p
|
| if request.repetition_penalty is not None:
|
| generation_kwargs["repetition_penalty"] = request.repetition_penalty
|
| if request.no_repeat_ngram_size is not None:
|
| generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size
|
|
|
| thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
| thread.start()
|
|
|
| response_id = f"chatcmpl-{int(time.time())}"
|
|
|
| for text in streamer:
|
| if text:
|
| text = fix_bpe_tokens(text)
|
| chunk = {
|
| "id": response_id,
|
| "object": "chat.completion.chunk",
|
| "created": int(time.time()),
|
| "model": request.model,
|
| "choices": [{
|
| "index": 0,
|
| "delta": {"content": text},
|
| "finish_reason": None
|
| }]
|
| }
|
| yield f"data: {json.dumps(chunk)}\n\n"
|
| await asyncio.sleep(0)
|
|
|
|
|
| final_chunk = {
|
| "id": response_id,
|
| "object": "chat.completion.chunk",
|
| "created": int(time.time()),
|
| "model": request.model,
|
| "choices": [{
|
| "index": 0,
|
| "delta": {},
|
| "finish_reason": "stop"
|
| }]
|
| }
|
| yield f"data: {json.dumps(final_chunk)}\n\n"
|
| yield "data: [DONE]\n\n"
|
|
|
| thread.join()
|
|
|
| return StreamingResponse(
|
| generate_stream(),
|
| media_type="text/event-stream",
|
| headers={
|
| "Cache-Control": "no-cache, no-store, must-revalidate",
|
| "Connection": "keep-alive",
|
| "X-Accel-Buffering": "no",
|
| "Transfer-Encoding": "chunked",
|
| }
|
| )
|
| else:
|
|
|
| generation_kwargs = {
|
| **inputs,
|
| "max_new_tokens": request.max_tokens,
|
| "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
|
| "top_p": request.top_p if request.top_p else 0.9,
|
| "do_sample": request.temperature is not None and request.temperature > 0,
|
| "pad_token_id": processor.tokenizer.eos_token_id,
|
| }
|
|
|
| if request.top_k is not None:
|
| generation_kwargs["top_k"] = request.top_k
|
| if request.min_p is not None:
|
| generation_kwargs["min_p"] = request.min_p
|
| if request.typical_p is not None:
|
| generation_kwargs["typical_p"] = request.typical_p
|
| if request.repetition_penalty is not None:
|
| generation_kwargs["repetition_penalty"] = request.repetition_penalty
|
| if request.no_repeat_ngram_size is not None:
|
| generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size
|
|
|
| with torch.no_grad():
|
| outputs = model.generate(**generation_kwargs)
|
|
|
| new_tokens = outputs[0][input_len:]
|
| response_text = processor.tokenizer.decode(
|
| new_tokens,
|
| skip_special_tokens=True,
|
| clean_up_tokenization_spaces=True
|
| )
|
| response_text = fix_bpe_tokens(response_text)
|
| print(f"Generated {len(new_tokens)} tokens")
|
|
|
| return {
|
| "id": f"chatcmpl-{int(time.time())}",
|
| "object": "chat.completion",
|
| "created": int(time.time()),
|
| "model": request.model,
|
| "choices": [{
|
| "index": 0,
|
| "message": {"role": "assistant", "content": response_text},
|
| "finish_reason": "stop"
|
| }],
|
| "usage": {
|
| "prompt_tokens": input_len,
|
| "completion_tokens": len(new_tokens),
|
| "total_tokens": input_len + len(new_tokens)
|
| }
|
| }
|
| except Exception as e:
|
| print(f"Error: {e}")
|
| traceback.print_exc()
|
| raise
|
|
|
| @app.get("/v1/models")
|
| async def list_models():
|
| return {
|
| "object": "list",
|
| "data": [{"id": "ministral-14b", "object": "model", "owned_by": "rolemodel"}]
|
| }
|
|
|
| @app.get("/health")
|
| async def health():
|
| return {"status": "ok"}
|
|
|
| if __name__ == "__main__":
|
| uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|