NemoVonNirgend commited on
Commit
6201452
·
verified ·
1 Parent(s): 78b9da8

Upload serve_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. serve_ministral.py +19 -17
serve_ministral.py CHANGED
@@ -5,7 +5,7 @@ Usage: python serve_ministral.py
5
  """
6
 
7
  import torch
8
- from transformers import AutoModelForCausalLM, AutoTokenizer
9
  from fastapi import FastAPI
10
  from pydantic import BaseModel
11
  from typing import List, Optional
@@ -16,7 +16,7 @@ app = FastAPI()
16
 
17
  # Global model and tokenizer
18
  model = None
19
- tokenizer = None
20
 
21
  class Message(BaseModel):
22
  role: str
@@ -39,15 +39,14 @@ class ChatResponse(BaseModel):
39
 
40
  @app.on_event("startup")
41
  async def load_model():
42
- global model, tokenizer
43
  print("Loading Ministral 14B...")
44
 
45
  model_id = "RoleModel/ministral-14b-merged-official"
46
 
47
- tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
48
 
49
- # Load just the text model weights
50
- model = AutoModelForCausalLM.from_pretrained(
51
  model_id,
52
  torch_dtype=torch.bfloat16,
53
  device_map="auto",
@@ -58,30 +57,33 @@ async def load_model():
58
 
59
  @app.post("/v1/chat/completions")
60
  async def chat_completions(request: ChatRequest):
61
- global model, tokenizer
62
 
63
  # Format messages using chat template
64
- chat_text = tokenizer.apply_chat_template(
65
- [{"role": m.role, "content": m.content} for m in request.messages],
 
 
66
  tokenize=False,
67
  add_generation_prompt=True
68
  )
69
 
70
- inputs = tokenizer(chat_text, return_tensors="pt").to(model.device)
71
 
72
  with torch.no_grad():
73
  outputs = model.generate(
74
  **inputs,
75
  max_new_tokens=request.max_tokens,
76
- temperature=request.temperature,
77
  top_p=request.top_p,
78
- do_sample=True,
79
- pad_token_id=tokenizer.eos_token_id,
80
  )
81
 
82
  # Decode only the new tokens
83
- new_tokens = outputs[0][inputs["input_ids"].shape[1]:]
84
- response_text = tokenizer.decode(new_tokens, skip_special_tokens=True)
 
85
 
86
  return ChatResponse(
87
  id=f"chatcmpl-{int(time.time())}",
@@ -93,9 +95,9 @@ async def chat_completions(request: ChatRequest):
93
  "finish_reason": "stop"
94
  }],
95
  usage={
96
- "prompt_tokens": inputs["input_ids"].shape[1],
97
  "completion_tokens": len(new_tokens),
98
- "total_tokens": inputs["input_ids"].shape[1] + len(new_tokens)
99
  }
100
  )
101
 
 
5
  """
6
 
7
  import torch
8
+ from transformers import AutoProcessor, AutoModelForImageTextToText
9
  from fastapi import FastAPI
10
  from pydantic import BaseModel
11
  from typing import List, Optional
 
16
 
17
  # Global model and tokenizer
18
  model = None
19
+ processor = None
20
 
21
  class Message(BaseModel):
22
  role: str
 
39
 
40
  @app.on_event("startup")
41
  async def load_model():
42
+ global model, processor
43
  print("Loading Ministral 14B...")
44
 
45
  model_id = "RoleModel/ministral-14b-merged-official"
46
 
47
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
48
 
49
+ model = AutoModelForImageTextToText.from_pretrained(
 
50
  model_id,
51
  torch_dtype=torch.bfloat16,
52
  device_map="auto",
 
57
 
58
  @app.post("/v1/chat/completions")
59
  async def chat_completions(request: ChatRequest):
60
+ global model, processor
61
 
62
  # Format messages using chat template
63
+ messages = [{"role": m.role, "content": m.content} for m in request.messages]
64
+
65
+ chat_text = processor.apply_chat_template(
66
+ messages,
67
  tokenize=False,
68
  add_generation_prompt=True
69
  )
70
 
71
+ inputs = processor(text=chat_text, return_tensors="pt").to(model.device)
72
 
73
  with torch.no_grad():
74
  outputs = model.generate(
75
  **inputs,
76
  max_new_tokens=request.max_tokens,
77
+ temperature=request.temperature if request.temperature > 0 else None,
78
  top_p=request.top_p,
79
+ do_sample=request.temperature > 0,
80
+ pad_token_id=processor.tokenizer.eos_token_id,
81
  )
82
 
83
  # Decode only the new tokens
84
+ input_len = inputs["input_ids"].shape[1]
85
+ new_tokens = outputs[0][input_len:]
86
+ response_text = processor.decode(new_tokens, skip_special_tokens=True)
87
 
88
  return ChatResponse(
89
  id=f"chatcmpl-{int(time.time())}",
 
95
  "finish_reason": "stop"
96
  }],
97
  usage={
98
+ "prompt_tokens": input_len,
99
  "completion_tokens": len(new_tokens),
100
+ "total_tokens": input_len + len(new_tokens)
101
  }
102
  )
103