NemoVonNirgend commited on
Commit
f58f56a
·
verified ·
1 Parent(s): 6201452

Upload serve_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral.py +66 -40
serve_ministral.py CHANGED
@@ -11,6 +11,7 @@ from pydantic import BaseModel
11
  from typing import List, Optional
12
  import uvicorn
13
  import time
 
14
 
15
  app = FastAPI()
16
 
@@ -59,47 +60,72 @@ async def load_model():
59
  async def chat_completions(request: ChatRequest):
60
  global model, processor
61
 
62
- # Format messages using chat template
63
- messages = [{"role": m.role, "content": m.content} for m in request.messages]
64
-
65
- chat_text = processor.apply_chat_template(
66
- messages,
67
- tokenize=False,
68
- add_generation_prompt=True
69
- )
70
-
71
- inputs = processor(text=chat_text, return_tensors="pt").to(model.device)
72
-
73
- with torch.no_grad():
74
- outputs = model.generate(
75
- **inputs,
76
- max_new_tokens=request.max_tokens,
77
- temperature=request.temperature if request.temperature > 0 else None,
78
- top_p=request.top_p,
79
- do_sample=request.temperature > 0,
80
- pad_token_id=processor.tokenizer.eos_token_id,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  )
82
-
83
- # Decode only the new tokens
84
- input_len = inputs["input_ids"].shape[1]
85
- new_tokens = outputs[0][input_len:]
86
- response_text = processor.decode(new_tokens, skip_special_tokens=True)
87
-
88
- return ChatResponse(
89
- id=f"chatcmpl-{int(time.time())}",
90
- created=int(time.time()),
91
- model=request.model,
92
- choices=[{
93
- "index": 0,
94
- "message": {"role": "assistant", "content": response_text},
95
- "finish_reason": "stop"
96
- }],
97
- usage={
98
- "prompt_tokens": input_len,
99
- "completion_tokens": len(new_tokens),
100
- "total_tokens": input_len + len(new_tokens)
101
- }
102
- )
103
 
104
  @app.get("/v1/models")
105
  async def list_models():
 
11
  from typing import List, Optional
12
  import uvicorn
13
  import time
14
+ import traceback
15
 
16
  app = FastAPI()
17
 
 
60
  async def chat_completions(request: ChatRequest):
61
  global model, processor
62
 
63
+ try:
64
+ # Format messages
65
+ messages = [{"role": m.role, "content": m.content} for m in request.messages]
66
+ print(f"Processing {len(messages)} messages...")
67
+
68
+ # Try to apply chat template
69
+ try:
70
+ chat_text = processor.apply_chat_template(
71
+ messages,
72
+ tokenize=False,
73
+ add_generation_prompt=True
74
+ )
75
+ except Exception as e:
76
+ print(f"Chat template error: {e}")
77
+ # Fallback: manual formatting
78
+ chat_text = ""
79
+ for m in messages:
80
+ if m["role"] == "user":
81
+ chat_text += f"[INST] {m['content']} [/INST]"
82
+ elif m["role"] == "assistant":
83
+ chat_text += f" {m['content']}</s>"
84
+ elif m["role"] == "system":
85
+ chat_text = f"<<SYS>>\n{m['content']}\n<</SYS>>\n\n" + chat_text
86
+
87
+ print(f"Input length: {len(chat_text)} chars")
88
+
89
+ # Tokenize
90
+ inputs = processor.tokenizer(chat_text, return_tensors="pt").to(model.device)
91
+ input_len = inputs["input_ids"].shape[1]
92
+ print(f"Input tokens: {input_len}")
93
+
94
+ # Generate
95
+ with torch.no_grad():
96
+ outputs = model.generate(
97
+ **inputs,
98
+ max_new_tokens=request.max_tokens,
99
+ temperature=request.temperature if request.temperature and request.temperature > 0 else 1.0,
100
+ top_p=request.top_p if request.top_p else 0.9,
101
+ do_sample=request.temperature is not None and request.temperature > 0,
102
+ pad_token_id=processor.tokenizer.eos_token_id,
103
+ )
104
+
105
+ # Decode only the new tokens
106
+ new_tokens = outputs[0][input_len:]
107
+ response_text = processor.tokenizer.decode(new_tokens, skip_special_tokens=True)
108
+ print(f"Generated {len(new_tokens)} tokens")
109
+
110
+ return ChatResponse(
111
+ id=f"chatcmpl-{int(time.time())}",
112
+ created=int(time.time()),
113
+ model=request.model,
114
+ choices=[{
115
+ "index": 0,
116
+ "message": {"role": "assistant", "content": response_text},
117
+ "finish_reason": "stop"
118
+ }],
119
+ usage={
120
+ "prompt_tokens": input_len,
121
+ "completion_tokens": len(new_tokens),
122
+ "total_tokens": input_len + len(new_tokens)
123
+ }
124
  )
125
+ except Exception as e:
126
+ print(f"Error: {e}")
127
+ traceback.print_exc()
128
+ raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
 
130
  @app.get("/v1/models")
131
  async def list_models():