NemoVonNirgend commited on
Commit
5e4c01b
·
verified ·
1 Parent(s): 1498531

Upload serve_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral.py +37 -8
serve_ministral.py CHANGED
@@ -75,6 +75,11 @@ class ChatRequest(BaseModel):
75
  max_tokens: Optional[int] = 2048
76
  temperature: Optional[float] = 0.7
77
  top_p: Optional[float] = 0.9
 
 
 
 
 
78
  stream: Optional[bool] = False
79
 
80
  @app.on_event("startup")
@@ -145,6 +150,17 @@ async def chat_completions(request: ChatRequest):
145
  "pad_token_id": processor.tokenizer.eos_token_id,
146
  "streamer": streamer,
147
  }
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
150
  thread.start()
@@ -197,15 +213,28 @@ async def chat_completions(request: ChatRequest):
197
  )
198
  else:
199
  # Non-streaming response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  with torch.no_grad():
201
- outputs = model.generate(
202
- **inputs,
203
- max_new_tokens=request.max_tokens,
204
- temperature=request.temperature if request.temperature and request.temperature > 0 else 1.0,
205
- top_p=request.top_p if request.top_p else 0.9,
206
- do_sample=request.temperature is not None and request.temperature > 0,
207
- pad_token_id=processor.tokenizer.eos_token_id,
208
- )
209
 
210
  new_tokens = outputs[0][input_len:]
211
  response_text = processor.tokenizer.decode(
 
75
  max_tokens: Optional[int] = 2048
76
  temperature: Optional[float] = 0.7
77
  top_p: Optional[float] = 0.9
78
+ top_k: Optional[int] = None
79
+ min_p: Optional[float] = None # "bottom_p" - minimum probability threshold
80
+ typical_p: Optional[float] = None # Typical decoding
81
+ repetition_penalty: Optional[float] = None # 1.0 = no penalty, >1.0 = penalize repeats
82
+ no_repeat_ngram_size: Optional[int] = None # Prevent n-gram repetition
83
  stream: Optional[bool] = False
84
 
85
  @app.on_event("startup")
 
150
  "pad_token_id": processor.tokenizer.eos_token_id,
151
  "streamer": streamer,
152
  }
153
+ # Add optional parameters if provided
154
+ if request.top_k is not None:
155
+ generation_kwargs["top_k"] = request.top_k
156
+ if request.min_p is not None:
157
+ generation_kwargs["min_p"] = request.min_p
158
+ if request.typical_p is not None:
159
+ generation_kwargs["typical_p"] = request.typical_p
160
+ if request.repetition_penalty is not None:
161
+ generation_kwargs["repetition_penalty"] = request.repetition_penalty
162
+ if request.no_repeat_ngram_size is not None:
163
+ generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size
164
 
165
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
166
  thread.start()
 
213
  )
214
  else:
215
  # Non-streaming response
216
+ generation_kwargs = {
217
+ **inputs,
218
+ "max_new_tokens": request.max_tokens,
219
+ "temperature": request.temperature if request.temperature and request.temperature > 0 else 1.0,
220
+ "top_p": request.top_p if request.top_p else 0.9,
221
+ "do_sample": request.temperature is not None and request.temperature > 0,
222
+ "pad_token_id": processor.tokenizer.eos_token_id,
223
+ }
224
+ # Add optional parameters if provided
225
+ if request.top_k is not None:
226
+ generation_kwargs["top_k"] = request.top_k
227
+ if request.min_p is not None:
228
+ generation_kwargs["min_p"] = request.min_p
229
+ if request.typical_p is not None:
230
+ generation_kwargs["typical_p"] = request.typical_p
231
+ if request.repetition_penalty is not None:
232
+ generation_kwargs["repetition_penalty"] = request.repetition_penalty
233
+ if request.no_repeat_ngram_size is not None:
234
+ generation_kwargs["no_repeat_ngram_size"] = request.no_repeat_ngram_size
235
+
236
  with torch.no_grad():
237
+ outputs = model.generate(**generation_kwargs)
 
 
 
 
 
 
 
238
 
239
  new_tokens = outputs[0][input_len:]
240
  response_text = processor.tokenizer.decode(