NemoVonNirgend commited on
Commit
78b9da8
·
verified ·
1 Parent(s): 0afa77b

Upload serve_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral.py +114 -0
serve_ministral.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simple OpenAI-compatible API server for Ministral 14B using transformers
4
+ Usage: python serve_ministral.py
5
+ """
6
+
7
+ import torch
8
+ from transformers import AutoModelForCausalLM, AutoTokenizer
9
+ from fastapi import FastAPI
10
+ from pydantic import BaseModel
11
+ from typing import List, Optional
12
+ import uvicorn
13
+ import time
14
+
15
+ app = FastAPI()
16
+
17
+ # Global model and tokenizer
18
+ model = None
19
+ tokenizer = None
20
+
21
+ class Message(BaseModel):
22
+ role: str
23
+ content: str
24
+
25
+ class ChatRequest(BaseModel):
26
+ model: str = "ministral-14b"
27
+ messages: List[Message]
28
+ max_tokens: Optional[int] = 2048
29
+ temperature: Optional[float] = 0.7
30
+ top_p: Optional[float] = 0.9
31
+
32
+ class ChatResponse(BaseModel):
33
+ id: str
34
+ object: str = "chat.completion"
35
+ created: int
36
+ model: str
37
+ choices: List[dict]
38
+ usage: dict
39
+
40
+ @app.on_event("startup")
41
+ async def load_model():
42
+ global model, tokenizer
43
+ print("Loading Ministral 14B...")
44
+
45
+ model_id = "RoleModel/ministral-14b-merged-official"
46
+
47
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
48
+
49
+ # Load just the text model weights
50
+ model = AutoModelForCausalLM.from_pretrained(
51
+ model_id,
52
+ torch_dtype=torch.bfloat16,
53
+ device_map="auto",
54
+ trust_remote_code=True,
55
+ )
56
+ model.eval()
57
+ print("Model loaded successfully!")
58
+
59
+ @app.post("/v1/chat/completions")
60
+ async def chat_completions(request: ChatRequest):
61
+ global model, tokenizer
62
+
63
+ # Format messages using chat template
64
+ chat_text = tokenizer.apply_chat_template(
65
+ [{"role": m.role, "content": m.content} for m in request.messages],
66
+ tokenize=False,
67
+ add_generation_prompt=True
68
+ )
69
+
70
+ inputs = tokenizer(chat_text, return_tensors="pt").to(model.device)
71
+
72
+ with torch.no_grad():
73
+ outputs = model.generate(
74
+ **inputs,
75
+ max_new_tokens=request.max_tokens,
76
+ temperature=request.temperature,
77
+ top_p=request.top_p,
78
+ do_sample=True,
79
+ pad_token_id=tokenizer.eos_token_id,
80
+ )
81
+
82
+ # Decode only the new tokens
83
+ new_tokens = outputs[0][inputs["input_ids"].shape[1]:]
84
+ response_text = tokenizer.decode(new_tokens, skip_special_tokens=True)
85
+
86
+ return ChatResponse(
87
+ id=f"chatcmpl-{int(time.time())}",
88
+ created=int(time.time()),
89
+ model=request.model,
90
+ choices=[{
91
+ "index": 0,
92
+ "message": {"role": "assistant", "content": response_text},
93
+ "finish_reason": "stop"
94
+ }],
95
+ usage={
96
+ "prompt_tokens": inputs["input_ids"].shape[1],
97
+ "completion_tokens": len(new_tokens),
98
+ "total_tokens": inputs["input_ids"].shape[1] + len(new_tokens)
99
+ }
100
+ )
101
+
102
+ @app.get("/v1/models")
103
+ async def list_models():
104
+ return {
105
+ "object": "list",
106
+ "data": [{"id": "ministral-14b", "object": "model", "owned_by": "rolemodel"}]
107
+ }
108
+
109
+ @app.get("/health")
110
+ async def health():
111
+ return {"status": "ok"}
112
+
113
+ if __name__ == "__main__":
114
+ uvicorn.run(app, host="0.0.0.0", port=8000)