NemoVonNirgend commited on
Commit
eba6b86
·
verified ·
1 Parent(s): 5e4c01b

Upload serve_ministral_fixed.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral_fixed.py +318 -0
serve_ministral_fixed.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenAI-compatible API server for Ministral 14B with streaming support
4
+ Fixed chat template for base models
5
+ """
6
+
7
+ import subprocess
8
+ import sys
9
+
10
+ def install_deps():
11
+ try:
12
+ import torch
13
+ need_torch = not torch.cuda.is_available()
14
+ except ImportError:
15
+ need_torch = True
16
+
17
+ print("=== Installing dependencies ===")
18
+
19
+ if need_torch:
20
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q", "torch"])
21
+
22
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
23
+ "git+https://github.com/huggingface/transformers.git"])
24
+
25
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-q",
26
+ "accelerate", "fastapi", "uvicorn", "pydantic", "sentencepiece", "protobuf"])
27
+
28
+ print("=== Dependencies installed ===")
29
+
30
+ install_deps()
31
+
32
+ import torch
33
+ from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer
34
+ from fastapi import FastAPI
35
+ from fastapi.responses import StreamingResponse
36
+ from pydantic import BaseModel
37
+ from typing import List, Optional
38
+ import uvicorn
39
+ import time
40
+ import traceback
41
+ import json
42
+ import asyncio
43
+ from threading import Thread
44
+
45
+ app = FastAPI()
46
+
47
+ # Mistral chat template
48
+ MISTRAL_CHAT_TEMPLATE = """{{- bos_token }}
49
+ {%- for message in messages %}
50
+ {%- if message['role'] == 'system' %}
51
+ {{- '[INST] ' + message['content'] + '\n\n' }}
52
+ {%- elif message['role'] == 'user' %}
53
+ {%- if loop.index0 == 0 and messages[0]['role'] != 'system' %}
54
+ {{- '[INST] ' + message['content'] + ' [/INST]' }}
55
+ {%- elif messages[0]['role'] == 'system' and loop.index0 == 1 %}
56
+ {{- message['content'] + ' [/INST]' }}
57
+ {%- else %}
58
+ {{- '[INST] ' + message['content'] + ' [/INST]' }}
59
+ {%- endif %}
60
+ {%- elif message['role'] == 'assistant' %}
61
+ {{- message['content'] + eos_token }}
62
+ {%- endif %}
63
+ {%- endfor %}
64
+ {%- if add_generation_prompt %}
65
+ {%- if messages[-1]['role'] != 'assistant' %}
66
+ {%- endif %}
67
+ {%- endif %}"""
68
+
69
+ def fix_bpe_tokens(text):
70
+ """Fix BPE tokenization artifacts"""
71
+ text = text.replace("Ġ", " ")
72
+ text = text.replace("Ċ", "\n")
73
+ text = text.replace("ĉ", "\t")
74
+ text = text.replace("âĢĻ", "'")
75
+ text = text.replace("âĢľ", '"')
76
+ text = text.replace("âĢĿ", '"')
77
+ text = text.replace("âĢĶ", "—")
78
+ text = text.replace("âĢĵ", "–")
79
+ text = text.replace("â̦", "…")
80
+ text = text.replace("âĢĺ", "'")
81
+ return text
82
+
83
+ def format_messages_mistral(messages):
84
+ """Format messages using Mistral format manually"""
85
+ text = "<s>"
86
+
87
+ for i, m in enumerate(messages):
88
+ role = m["role"]
89
+ content = m["content"]
90
+
91
+ if role == "system":
92
+ # System prompt gets wrapped in first INST
93
+ text += f"[INST] {content}\n\n"
94
+ elif role == "user":
95
+ if i == 0:
96
+ # First user message
97
+ text += f"[INST] {content} [/INST]"
98
+ elif i > 0 and messages[i-1]["role"] == "system":
99
+ # User message right after system
100
+ text += f"{content} [/INST]"
101
+ else:
102
+ # Subsequent user messages
103
+ text += f"[INST] {content} [/INST]"
104
+ elif role == "assistant":
105
+ text += f"{content}</s>"
106
+
107
+ return text
108
+
109
+ model = None
110
+ processor = None
111
+
112
+ class Message(BaseModel):
113
+ role: str
114
+ content: str
115
+
116
+ class ChatRequest(BaseModel):
117
+ model: str = "ministral-14b"
118
+ messages: List[Message]
119
+ max_tokens: Optional[int] = 2048
120
+ temperature: Optional[float] = 0.7
121
+ top_p: Optional[float] = 0.9
122
+ top_k: Optional[int] = None
123
+ min_p: Optional[float] = None
124
+ typical_p: Optional[float] = None
125
+ repetition_penalty: Optional[float] = None
126
+ no_repeat_ngram_size: Optional[int] = None
127
+ stream: Optional[bool] = False
128
+
129
+ @app.on_event("startup")
130
+ async def load_model():
131
+ global model, processor
132
+ print("Loading Ministral 14B...")
133
+
134
+ model_id = "RoleModel/ministral-14b-merged-official"
135
+
136
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
137
+
138
+ # Set chat template if missing
139
+ if processor.tokenizer.chat_template is None:
140
+ print("Setting Mistral chat template...")
141
+ processor.tokenizer.chat_template = MISTRAL_CHAT_TEMPLATE
142
+
143
+ model = AutoModelForImageTextToText.from_pretrained(
144
+ model_id,
145
+ torch_dtype=torch.bfloat16,
146
+ device_map="auto",
147
+ trust_remote_code=True,
148
+ )
149
+ model.eval()
150
+ print("Model loaded successfully!")
151
+
152
+ @app.post("/v1/chat/completions")
153
+ async def chat_completions(request: ChatRequest):
154
+ global model, processor
155
+
156
+ try:
157
+ messages = [{"role": m.role, "content": m.content} for m in request.messages]
158
+ print(f"Processing {len(messages)} messages, stream={request.stream}")
159
+
160
+ # Try chat template, fall back to manual formatting
161
+ try:
162
+ chat_text = processor.apply_chat_template(
163
+ messages,
164
+ tokenize=False,
165
+ add_generation_prompt=True
166
+ )
167
+ except Exception as e:
168
+ print(f"Chat template error: {e}, using manual format")
169
+ chat_text = format_messages_mistral(messages)
170
+
171
+ print(f"Formatted prompt:\n{chat_text[:500]}...")
172
+
173
+ inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
174
+ input_len = inputs["input_ids"].shape[1]
175
+ print(f"Input tokens: {input_len}")
176
+
177
+ if request.stream:
178
+ async def generate_stream():
179
+ streamer = TextIteratorStreamer(
180
+ processor.tokenizer,
181
+ skip_prompt=True,
182
+ skip_special_tokens=True
183
+ )
184
+
185
+ generation_kwargs = {
186
+ **inputs,
187
+ "max_new_tokens": request.max_tokens,
188
+ "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
189
+ "top_p": request.top_p if request.top_p else 0.9,
190
+ "do_sample": request.temperature is not None and request.temperature > 0,
191
+ "pad_token_id": processor.tokenizer.eos_token_id,
192
+ "streamer": streamer,
193
+ }
194
+ if request.top_k is not None:
195
+ generation_kwargs["top_k"] = request.top_k
196
+ if request.min_p is not None:
197
+ generation_kwargs["min_p"] = request.min_p
198
+ if request.typical_p is not None:
199
+ generation_kwargs["typical_p"] = request.typical_p
200
+ if request.repetition_penalty is not None:
201
+ generation_kwargs["repetition_penalty"] = request.repetition_penalty
202
+ if request.no_repeat_ngram_size is not None:
203
+ generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size
204
+
205
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
206
+ thread.start()
207
+
208
+ response_id = f"chatcmpl-{int(time.time())}"
209
+
210
+ for text in streamer:
211
+ if text:
212
+ text = fix_bpe_tokens(text)
213
+ chunk = {
214
+ "id": response_id,
215
+ "object": "chat.completion.chunk",
216
+ "created": int(time.time()),
217
+ "model": request.model,
218
+ "choices": [{
219
+ "index": 0,
220
+ "delta": {"content": text},
221
+ "finish_reason": None
222
+ }]
223
+ }
224
+ yield f"data: {json.dumps(chunk)}\n\n"
225
+ await asyncio.sleep(0)
226
+
227
+ final_chunk = {
228
+ "id": response_id,
229
+ "object": "chat.completion.chunk",
230
+ "created": int(time.time()),
231
+ "model": request.model,
232
+ "choices": [{
233
+ "index": 0,
234
+ "delta": {},
235
+ "finish_reason": "stop"
236
+ }]
237
+ }
238
+ yield f"data: {json.dumps(final_chunk)}\n\n"
239
+ yield "data: [DONE]\n\n"
240
+
241
+ thread.join()
242
+
243
+ return StreamingResponse(
244
+ generate_stream(),
245
+ media_type="text/event-stream",
246
+ headers={
247
+ "Cache-Control": "no-cache, no-store, must-revalidate",
248
+ "Connection": "keep-alive",
249
+ "X-Accel-Buffering": "no",
250
+ "Transfer-Encoding": "chunked",
251
+ }
252
+ )
253
+ else:
254
+ generation_kwargs = {
255
+ **inputs,
256
+ "max_new_tokens": request.max_tokens,
257
+ "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
258
+ "top_p": request.top_p if request.top_p else 0.9,
259
+ "do_sample": request.temperature is not None and request.temperature > 0,
260
+ "pad_token_id": processor.tokenizer.eos_token_id,
261
+ }
262
+ if request.top_k is not None:
263
+ generation_kwargs["top_k"] = request.top_k
264
+ if request.min_p is not None:
265
+ generation_kwargs["min_p"] = request.min_p
266
+ if request.typical_p is not None:
267
+ generation_kwargs["typical_p"] = request.typical_p
268
+ if request.repetition_penalty is not None:
269
+ generation_kwargs["repetition_penalty"] = request.repetition_penalty
270
+ if request.no_repeat_ngram_size is not None:
271
+ generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size
272
+
273
+ with torch.no_grad():
274
+ outputs = model.generate(**generation_kwargs)
275
+
276
+ new_tokens = outputs[0][input_len:]
277
+ response_text = processor.tokenizer.decode(
278
+ new_tokens,
279
+ skip_special_tokens=True,
280
+ clean_up_tokenization_spaces=True
281
+ )
282
+ response_text = fix_bpe_tokens(response_text)
283
+ print(f"Generated {len(new_tokens)} tokens")
284
+
285
+ return {
286
+ "id": f"chatcmpl-{int(time.time())}",
287
+ "object": "chat.completion",
288
+ "created": int(time.time()),
289
+ "model": request.model,
290
+ "choices": [{
291
+ "index": 0,
292
+ "message": {"role": "assistant", "content": response_text},
293
+ "finish_reason": "stop"
294
+ }],
295
+ "usage": {
296
+ "prompt_tokens": input_len,
297
+ "completion_tokens": len(new_tokens),
298
+ "total_tokens": input_len + len(new_tokens)
299
+ }
300
+ }
301
+ except Exception as e:
302
+ print(f"Error: {e}")
303
+ traceback.print_exc()
304
+ raise
305
+
306
+ @app.get("/v1/models")
307
+ async def list_models():
308
+ return {
309
+ "object": "list",
310
+ "data": [{"id": "ministral-14b", "object": "model", "owned_by": "rolemodel"}]
311
+ }
312
+
313
+ @app.get("/health")
314
+ async def health():
315
+ return {"status": "ok"}
316
+
317
+ if __name__ == "__main__":
318
+ uvicorn.run(app, host="0.0.0.0", port=8000)