NemoVonNirgend commited on
Commit
3203e84
·
verified ·
1 Parent(s): ead8f66

Upload serve_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral.py +106 -49
serve_ministral.py CHANGED
@@ -1,17 +1,20 @@
1
  #!/usr/bin/env python3
2
  """
3
- Simple OpenAI-compatible API server for Ministral 14B using transformers
4
- Usage: python serve_ministral.py
5
  """
6
 
7
  import torch
8
- from transformers import AutoProcessor, AutoModelForImageTextToText
9
  from fastapi import FastAPI
 
10
  from pydantic import BaseModel
11
  from typing import List, Optional
12
  import uvicorn
13
  import time
14
  import traceback
 
 
 
15
 
16
  app = FastAPI()
17
 
@@ -43,14 +46,7 @@ class ChatRequest(BaseModel):
43
  max_tokens: Optional[int] = 2048
44
  temperature: Optional[float] = 0.7
45
  top_p: Optional[float] = 0.9
46
-
47
- class ChatResponse(BaseModel):
48
- id: str
49
- object: str = "chat.completion"
50
- created: int
51
- model: str
52
- choices: List[dict]
53
- usage: dict
54
 
55
  @app.on_event("startup")
56
  async def load_model():
@@ -77,7 +73,7 @@ async def chat_completions(request: ChatRequest):
77
  try:
78
  # Format messages
79
  messages = [{"role": m.role, "content": m.content} for m in request.messages]
80
- print(f"Processing {len(messages)} messages...")
81
 
82
  # Try to apply chat template
83
  try:
@@ -88,7 +84,6 @@ async def chat_completions(request: ChatRequest):
88
  )
89
  except Exception as e:
90
  print(f"Chat template error: {e}")
91
- # Fallback: ShareGPT format with [INST] tags
92
  chat_text = "<s>"
93
  for m in messages:
94
  if m["role"] == "system":
@@ -98,49 +93,111 @@ async def chat_completions(request: ChatRequest):
98
  elif m["role"] == "assistant":
99
  chat_text += f"{m['content']}</s>"
100
 
101
- print(f"Input length: {len(chat_text)} chars")
102
-
103
  # Tokenize
104
  inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
105
  input_len = inputs["input_ids"].shape[1]
106
  print(f"Input tokens: {input_len}")
107
 
108
- # Generate
109
- with torch.no_grad():
110
- outputs = model.generate(
111
- **inputs,
112
- max_new_tokens=request.max_tokens,
113
- temperature=request.temperature if request.temperature and request.temperature > 0 else 1.0,
114
- top_p=request.top_p if request.top_p else 0.9,
115
- do_sample=request.temperature is not None and request.temperature > 0,
116
- pad_token_id=processor.tokenizer.eos_token_id,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  )
118
-
119
- # Decode only the new tokens
120
- new_tokens = outputs[0][input_len:]
121
- response_text = processor.tokenizer.decode(
122
- new_tokens,
123
- skip_special_tokens=True,
124
- clean_up_tokenization_spaces=True
125
- )
126
- response_text = fix_bpe_tokens(response_text)
127
- print(f"Generated {len(new_tokens)} tokens")
128
-
129
- return ChatResponse(
130
- id=f"chatcmpl-{int(time.time())}",
131
- created=int(time.time()),
132
- model=request.model,
133
- choices=[{
134
- "index": 0,
135
- "message": {"role": "assistant", "content": response_text},
136
- "finish_reason": "stop"
137
- }],
138
- usage={
139
- "prompt_tokens": input_len,
140
- "completion_tokens": len(new_tokens),
141
- "total_tokens": input_len + len(new_tokens)
 
 
 
 
 
 
 
 
 
 
 
 
142
  }
143
- )
144
  except Exception as e:
145
  print(f"Error: {e}")
146
  traceback.print_exc()
 
1
  #!/usr/bin/env python3
2
  """
3
+ OpenAI-compatible API server for Ministral 14B with streaming support
 
4
  """
5
 
6
  import torch
7
+ from transformers import AutoProcessor, AutoModelForImageTextToText, TextIteratorStreamer
8
  from fastapi import FastAPI
9
+ from fastapi.responses import StreamingResponse
10
  from pydantic import BaseModel
11
  from typing import List, Optional
12
  import uvicorn
13
  import time
14
  import traceback
15
+ import json
16
+ import asyncio
17
+ from threading import Thread
18
 
19
  app = FastAPI()
20
 
 
46
  max_tokens: Optional[int] = 2048
47
  temperature: Optional[float] = 0.7
48
  top_p: Optional[float] = 0.9
49
+ stream: Optional[bool] = False
 
 
 
 
 
 
 
50
 
51
  @app.on_event("startup")
52
  async def load_model():
 
73
  try:
74
  # Format messages
75
  messages = [{"role": m.role, "content": m.content} for m in request.messages]
76
+ print(f"Processing {len(messages)} messages, stream={request.stream}")
77
 
78
  # Try to apply chat template
79
  try:
 
84
  )
85
  except Exception as e:
86
  print(f"Chat template error: {e}")
 
87
  chat_text = "<s>"
88
  for m in messages:
89
  if m["role"] == "system":
 
93
  elif m["role"] == "assistant":
94
  chat_text += f"{m['content']}</s>"
95
 
 
 
96
  # Tokenize
97
  inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
98
  input_len = inputs["input_ids"].shape[1]
99
  print(f"Input tokens: {input_len}")
100
 
101
+ if request.stream:
102
+ # Streaming response
103
+ async def generate_stream():
104
+ streamer = TextIteratorStreamer(
105
+ processor.tokenizer,
106
+ skip_prompt=True,
107
+ skip_special_tokens=True
108
+ )
109
+
110
+ generation_kwargs = {
111
+ **inputs,
112
+ "max_new_tokens": request.max_tokens,
113
+ "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
114
+ "top_p": request.top_p if request.top_p else 0.9,
115
+ "do_sample": request.temperature is not None and request.temperature > 0,
116
+ "pad_token_id": processor.tokenizer.eos_token_id,
117
+ "streamer": streamer,
118
+ }
119
+
120
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
121
+ thread.start()
122
+
123
+ response_id = f"chatcmpl-{int(time.time())}"
124
+
125
+ for text in streamer:
126
+ if text:
127
+ text = fix_bpe_tokens(text)
128
+ chunk = {
129
+ "id": response_id,
130
+ "object": "chat.completion.chunk",
131
+ "created": int(time.time()),
132
+ "model": request.model,
133
+ "choices": [{
134
+ "index": 0,
135
+ "delta": {"content": text},
136
+ "finish_reason": None
137
+ }]
138
+ }
139
+ yield f"data: {json.dumps(chunk)}\n\n"
140
+ await asyncio.sleep(0)
141
+
142
+ # Send final chunk
143
+ final_chunk = {
144
+ "id": response_id,
145
+ "object": "chat.completion.chunk",
146
+ "created": int(time.time()),
147
+ "model": request.model,
148
+ "choices": [{
149
+ "index": 0,
150
+ "delta": {},
151
+ "finish_reason": "stop"
152
+ }]
153
+ }
154
+ yield f"data: {json.dumps(final_chunk)}\n\n"
155
+ yield "data: [DONE]\n\n"
156
+
157
+ thread.join()
158
+
159
+ return StreamingResponse(
160
+ generate_stream(),
161
+ media_type="text/event-stream",
162
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}
163
  )
164
+ else:
165
+ # Non-streaming response
166
+ with torch.no_grad():
167
+ outputs = model.generate(
168
+ **inputs,
169
+ max_new_tokens=request.max_tokens,
170
+ temperature=request.temperature if request.temperature and request.temperature > 0 else 1.0,
171
+ top_p=request.top_p if request.top_p else 0.9,
172
+ do_sample=request.temperature is not None and request.temperature > 0,
173
+ pad_token_id=processor.tokenizer.eos_token_id,
174
+ )
175
+
176
+ new_tokens = outputs[0][input_len:]
177
+ response_text = processor.tokenizer.decode(
178
+ new_tokens,
179
+ skip_special_tokens=True,
180
+ clean_up_tokenization_spaces=True
181
+ )
182
+ response_text = fix_bpe_tokens(response_text)
183
+ print(f"Generated {len(new_tokens)} tokens")
184
+
185
+ return {
186
+ "id": f"chatcmpl-{int(time.time())}",
187
+ "object": "chat.completion",
188
+ "created": int(time.time()),
189
+ "model": request.model,
190
+ "choices": [{
191
+ "index": 0,
192
+ "message": {"role": "assistant", "content": response_text},
193
+ "finish_reason": "stop"
194
+ }],
195
+ "usage": {
196
+ "prompt_tokens": input_len,
197
+ "completion_tokens": len(new_tokens),
198
+ "total_tokens": input_len + len(new_tokens)
199
+ }
200
  }
 
201
  except Exception as e:
202
  print(f"Error: {e}")
203
  traceback.print_exc()