NemoVonNirgend commited on
Commit
ee74ec9
·
verified ·
1 Parent(s): 3203e84

Upload serve_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral.py +243 -218
serve_ministral.py CHANGED
@@ -1,218 +1,243 @@
1
- #!/usr/bin/env python3
2
- """
3
- OpenAI-compatible API server for Ministral 14B with streaming support
4
- """
5
-
6
- import torch
7
- from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer
8
- from fastapi import FastAPI
9
- from fastapi.responses import StreamingResponse
10
- from pydantic import BaseModel
11
- from typing import List, Optional
12
- import uvicorn
13
- import time
14
- import traceback
15
- import json
16
- import asyncio
17
- from threading import Thread
18
-
19
- app = FastAPI()
20
-
21
- def fix_bpe_tokens(text):
22
- """Fix BPE tokenization artifacts"""
23
- text = text.replace("Ġ", " ")
24
- text = text.replace("Ċ", "\n")
25
- text = text.replace("ĉ", "\t")
26
- text = text.replace("âĢĻ", "'")
27
- text = text.replace("âĢľ", '"')
28
- text = text.replace("âĢĿ", '"')
29
- text = text.replace("âĢĶ", "—")
30
- text = text.replace("âĢĵ", "–")
31
- text = text.replace("â̦", "…")
32
- text = text.replace("âĢĺ", "'")
33
- return text
34
-
35
- # Global model and tokenizer
36
- model = None
37
- processor = None
38
-
39
- class Message(BaseModel):
40
- role: str
41
- content: str
42
-
43
- class ChatRequest(BaseModel):
44
- model: str = "ministral-14b"
45
- messages: List[Message]
46
- max_tokens: Optional[int] = 2048
47
- temperature: Optional[float] = 0.7
48
- top_p: Optional[float] = 0.9
49
- stream: Optional[bool] = False
50
-
51
- @app.on_event("startup")
52
- async def load_model():
53
- global model, processor
54
- print("Loading Ministral 14B...")
55
-
56
- model_id = "RoleModel/ministral-14b-merged-official"
57
-
58
- processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
59
-
60
- model = AutoModelForImageTextToText.from_pretrained(
61
- model_id,
62
- torch_dtype=torch.bfloat16,
63
- device_map="auto",
64
- trust_remote_code=True,
65
- )
66
- model.eval()
67
- print("Model loaded successfully!")
68
-
69
- @app.post("/v1/chat/completions")
70
- async def chat_completions(request: ChatRequest):
71
- global model, processor
72
-
73
- try:
74
- # Format messages
75
- messages = [{"role": m.role, "content": m.content} for m in request.messages]
76
- print(f"Processing {len(messages)} messages, stream={request.stream}")
77
-
78
- # Try to apply chat template
79
- try:
80
- chat_text = processor.apply_chat_template(
81
- messages,
82
- tokenize=False,
83
- add_generation_prompt=True
84
- )
85
- except Exception as e:
86
- print(f"Chat template error: {e}")
87
- chat_text = "<s>"
88
- for m in messages:
89
- if m["role"] == "system":
90
- chat_text += f"[SYSTEM_PROMPT]{m['content']}[/SYSTEM_PROMPT]"
91
- elif m["role"] == "user":
92
- chat_text += f"[INST]{m['content']}[/INST]"
93
- elif m["role"] == "assistant":
94
- chat_text += f"{m['content']}</s>"
95
-
96
- # Tokenize
97
- inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
98
- input_len = inputs["input_ids"].shape[1]
99
- print(f"Input tokens: {input_len}")
100
-
101
- if request.stream:
102
- # Streaming response
103
- async def generate_stream():
104
- streamer = TextIteratorStreamer(
105
- processor.tokenizer,
106
- skip_prompt=True,
107
- skip_special_tokens=True
108
- )
109
-
110
- generation_kwargs = {
111
- **inputs,
112
- "max_new_tokens": request.max_tokens,
113
- "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
114
- "top_p": request.top_p if request.top_p else 0.9,
115
- "do_sample": request.temperature is not None and request.temperature > 0,
116
- "pad_token_id": processor.tokenizer.eos_token_id,
117
- "streamer": streamer,
118
- }
119
-
120
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
121
- thread.start()
122
-
123
- response_id = f"chatcmpl-{int(time.time())}"
124
-
125
- for text in streamer:
126
- if text:
127
- text = fix_bpe_tokens(text)
128
- chunk = {
129
- "id": response_id,
130
- "object": "chat.completion.chunk",
131
- "created": int(time.time()),
132
- "model": request.model,
133
- "choices": [{
134
- "index": 0,
135
- "delta": {"content": text},
136
- "finish_reason": None
137
- }]
138
- }
139
- yield f"data: {json.dumps(chunk)}\n\n"
140
- await asyncio.sleep(0)
141
-
142
- # Send final chunk
143
- final_chunk = {
144
- "id": response_id,
145
- "object": "chat.completion.chunk",
146
- "created": int(time.time()),
147
- "model": request.model,
148
- "choices": [{
149
- "index": 0,
150
- "delta": {},
151
- "finish_reason": "stop"
152
- }]
153
- }
154
- yield f"data: {json.dumps(final_chunk)}\n\n"
155
- yield "data: [DONE]\n\n"
156
-
157
- thread.join()
158
-
159
- return StreamingResponse(
160
- generate_stream(),
161
- media_type="text/event-stream",
162
- headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
163
- )
164
- else:
165
- # Non-streaming response
166
- with torch.no_grad():
167
- outputs = model.generate(
168
- **inputs,
169
- max_new_tokens=request.max_tokens,
170
- temperature=request.temperature if request.temperature and request.temperature > 0 else 1.0,
171
- top_p=request.top_p if request.top_p else 0.9,
172
- do_sample=request.temperature is not None and request.temperature > 0,
173
- pad_token_id=processor.tokenizer.eos_token_id,
174
- )
175
-
176
- new_tokens = outputs[0][input_len:]
177
- response_text = processor.tokenizer.decode(
178
- new_tokens,
179
- skip_special_tokens=True,
180
- clean_up_tokenization_spaces=True
181
- )
182
- response_text = fix_bpe_tokens(response_text)
183
- print(f"Generated {len(new_tokens)} tokens")
184
-
185
- return {
186
- "id": f"chatcmpl-{int(time.time())}",
187
- "object": "chat.completion",
188
- "created": int(time.time()),
189
- "model": request.model,
190
- "choices": [{
191
- "index": 0,
192
- "message": {"role": "assistant", "content": response_text},
193
- "finish_reason": "stop"
194
- }],
195
- "usage": {
196
- "prompt_tokens": input_len,
197
- "completion_tokens": len(new_tokens),
198
- "total_tokens": input_len + len(new_tokens)
199
- }
200
- }
201
- except Exception as e:
202
- print(f"Error: {e}")
203
- traceback.print_exc()
204
- raise
205
-
206
- @app.get("/v1/models")
207
- async def list_models():
208
- return {
209
- "object": "list",
210
- "data": [{"id": "ministral-14b", "object": "model", "owned_by": "rolemodel"}]
211
- }
212
-
213
- @app.get("/health")
214
- async def health():
215
- return {"status": "ok"}
216
-
217
- if __name__ == "__main__":
218
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenAI-compatible API server for Ministral 14B with streaming support
4
+ """
5
+
6
+ # Install dependencies first
7
+ import subprocess
8
+ import sys
9
+
10
+ def install_deps():
11
+ deps = ["transformers", "accelerate", "fastapi", "uvicorn", "pydantic", "sentencepiece", "protobuf"]
12
+ # Check if torch with CUDA exists, only install if missing
13
+ try:
14
+ import torch
15
+ if not torch.cuda.is_available():
16
+ deps.insert(0, "torch")
17
+ except ImportError:
18
+ deps.insert(0, "torch")
19
+
20
+ print("=== Installing dependencies ===")
21
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "-U", "-q"] + deps)
22
+ print("=== Dependencies installed ===")
23
+
24
+ install_deps()
25
+
26
+ import torch
27
+ from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer
28
+ from fastapi import FastAPI
29
+ from fastapi.responses import StreamingResponse
30
+ from pydantic import BaseModel
31
+ from typing import List, Optional
32
+ import uvicorn
33
+ import time
34
+ import traceback
35
+ import json
36
+ import asyncio
37
+ from threading import Thread
38
+
39
+ app = FastAPI()
40
+
41
+ def fix_bpe_tokens(text):
42
+ """Fix BPE tokenization artifacts"""
43
+ text = text.replace("Ġ", " ")
44
+ text = text.replace("Ċ", "\n")
45
+ text = text.replace("ĉ", "\t")
46
+ text = text.replace("âĢĻ", "'")
47
+ text = text.replace("âĢľ", '"')
48
+ text = text.replace("âĢĿ", '"')
49
+ text = text.replace("âĢĶ", "—")
50
+ text = text.replace("âĢĵ", "–")
51
+ text = text.replace("â̦", "…")
52
+ text = text.replace("âĢĺ", "'")
53
+ return text
54
+
55
+ # Global model and tokenizer
56
+ model = None
57
+ processor = None
58
+
59
+ class Message(BaseModel):
60
+ role: str
61
+ content: str
62
+
63
+ class ChatRequest(BaseModel):
64
+ model: str = "ministral-14b"
65
+ messages: List[Message]
66
+ max_tokens: Optional[int] = 2048
67
+ temperature: Optional[float] = 0.7
68
+ top_p: Optional[float] = 0.9
69
+ stream: Optional[bool] = False
70
+
71
+ @app.on_event("startup")
72
+ async def load_model():
73
+ global model, processor
74
+ print("Loading Ministral 14B...")
75
+
76
+ model_id = "RoleModel/ministral-14b-merged-official"
77
+
78
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
79
+
80
+ model = AutoModelForImageTextToText.from_pretrained(
81
+ model_id,
82
+ torch_dtype=torch.bfloat16,
83
+ device_map="auto",
84
+ trust_remote_code=True,
85
+ )
86
+ model.eval()
87
+ print("Model loaded successfully!")
88
+
89
+ @app.post("/v1/chat/completions")
90
+ async def chat_completions(request: ChatRequest):
91
+ global model, processor
92
+
93
+ try:
94
+ # Format messages
95
+ messages = [{"role": m.role, "content": m.content} for m in request.messages]
96
+ print(f"Processing {len(messages)} messages, stream={request.stream}")
97
+
98
+ # Try to apply chat template
99
+ try:
100
+ chat_text = processor.apply_chat_template(
101
+ messages,
102
+ tokenize=False,
103
+ add_generation_prompt=True
104
+ )
105
+ except Exception as e:
106
+ print(f"Chat template error: {e}")
107
+ chat_text = "<s>"
108
+ for m in messages:
109
+ if m["role"] == "system":
110
+ chat_text += f"[SYSTEM_PROMPT]{m['content']}[/SYSTEM_PROMPT]"
111
+ elif m["role"] == "user":
112
+ chat_text += f"[INST]{m['content']}[/INST]"
113
+ elif m["role"] == "assistant":
114
+ chat_text += f"{m['content']}</s>"
115
+
116
+ # Tokenize
117
+ inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
118
+ input_len = inputs["input_ids"].shape[1]
119
+ print(f"Input tokens: {input_len}")
120
+
121
+ if request.stream:
122
+ # Streaming response
123
+ async def generate_stream():
124
+ streamer = TextIteratorStreamer(
125
+ processor.tokenizer,
126
+ skip_prompt=True,
127
+ skip_special_tokens=True
128
+ )
129
+
130
+ generation_kwargs = {
131
+ **inputs,
132
+ "max_new_tokens": request.max_tokens,
133
+ "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
134
+ "top_p": request.top_p if request.top_p else 0.9,
135
+ "do_sample": request.temperature is not None and request.temperature > 0,
136
+ "pad_token_id": processor.tokenizer.eos_token_id,
137
+ "streamer": streamer,
138
+ }
139
+
140
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
141
+ thread.start()
142
+
143
+ response_id = f"chatcmpl-{int(time.time())}"
144
+
145
+ for text in streamer:
146
+ if text:
147
+ text = fix_bpe_tokens(text)
148
+ chunk = {
149
+ "id": response_id,
150
+ "object": "chat.completion.chunk",
151
+ "created": int(time.time()),
152
+ "model": request.model,
153
+ "choices": [{
154
+ "index": 0,
155
+ "delta": {"content": text},
156
+ "finish_reason": None
157
+ }]
158
+ }
159
+ yield f"data: {json.dumps(chunk)}\n\n"
160
+ await asyncio.sleep(0)
161
+
162
+ # Send final chunk
163
+ final_chunk = {
164
+ "id": response_id,
165
+ "object": "chat.completion.chunk",
166
+ "created": int(time.time()),
167
+ "model": request.model,
168
+ "choices": [{
169
+ "index": 0,
170
+ "delta": {},
171
+ "finish_reason": "stop"
172
+ }]
173
+ }
174
+ yield f"data: {json.dumps(final_chunk)}\n\n"
175
+ yield "data: [DONE]\n\n"
176
+
177
+ thread.join()
178
+
179
+ return StreamingResponse(
180
+ generate_stream(),
181
+ media_type="text/event-stream",
182
+ headers={
183
+ "Cache-Control": "no-cache, no-store, must-revalidate",
184
+ "Connection": "keep-alive",
185
+ "X-Accel-Buffering": "no", # Disable nginx buffering
186
+ "Transfer-Encoding": "chunked",
187
+ }
188
+ )
189
+ else:
190
+ # Non-streaming response
191
+ with torch.no_grad():
192
+ outputs = model.generate(
193
+ **inputs,
194
+ max_new_tokens=request.max_tokens,
195
+ temperature=request.temperature if request.temperature and request.temperature > 0 else 1.0,
196
+ top_p=request.top_p if request.top_p else 0.9,
197
+ do_sample=request.temperature is not None and request.temperature > 0,
198
+ pad_token_id=processor.tokenizer.eos_token_id,
199
+ )
200
+
201
+ new_tokens = outputs[0][input_len:]
202
+ response_text = processor.tokenizer.decode(
203
+ new_tokens,
204
+ skip_special_tokens=True,
205
+ clean_up_tokenization_spaces=True
206
+ )
207
+ response_text = fix_bpe_tokens(response_text)
208
+ print(f"Generated {len(new_tokens)} tokens")
209
+
210
+ return {
211
+ "id": f"chatcmpl-{int(time.time())}",
212
+ "object": "chat.completion",
213
+ "created": int(time.time()),
214
+ "model": request.model,
215
+ "choices": [{
216
+ "index": 0,
217
+ "message": {"role": "assistant", "content": response_text},
218
+ "finish_reason": "stop"
219
+ }],
220
+ "usage": {
221
+ "prompt_tokens": input_len,
222
+ "completion_tokens": len(new_tokens),
223
+ "total_tokens": input_len + len(new_tokens)
224
+ }
225
+ }
226
+ except Exception as e:
227
+ print(f"Error: {e}")
228
+ traceback.print_exc()
229
+ raise
230
+
231
+ @app.get("/v1/models")
232
+ async def list_models():
233
+ return {
234
+ "object": "list",
235
+ "data": [{"id": "ministral-14b", "object": "model", "owned_by": "rolemodel"}]
236
+ }
237
+
238
+ @app.get("/health")
239
+ async def health():
240
+ return {"status": "ok"}
241
+
242
+ if __name__ == "__main__":
243
+ uvicorn.run(app, host="0.0.0.0", port=8000)