update app
Browse files
app.py
CHANGED
|
@@ -17,6 +17,9 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 17 |
device_map="auto",
|
| 18 |
trust_remote_code=True,
|
| 19 |
)
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
# Request schema
|
| 22 |
class ChatRequest(BaseModel):
|
|
@@ -26,13 +29,19 @@ class ChatRequest(BaseModel):
|
|
| 26 |
@app.post("/chat")
|
| 27 |
def chat(req: ChatRequest):
|
| 28 |
# Format input sesuai template Qwen
|
| 29 |
-
text = tokenizer.apply_chat_template(
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
inputs = tokenizer(
|
| 36 |
|
| 37 |
# Generate
|
| 38 |
outputs = model.generate(
|
|
|
|
| 17 |
device_map="auto",
|
| 18 |
trust_remote_code=True,
|
| 19 |
)
|
| 20 |
+
if not tokenizer.chat_template:
|
| 21 |
+
tokenizer.chat_template = """{% for message in messages %}{{ message['role'] }}: {{ message['content'] }}
|
| 22 |
+
{% endfor %}Assistant:"""
|
| 23 |
|
| 24 |
# Request schema
|
| 25 |
class ChatRequest(BaseModel):
|
|
|
|
| 29 |
@app.post("/chat")
|
| 30 |
def chat(req: ChatRequest):
|
| 31 |
# Format input sesuai template Qwen
|
| 32 |
+
# text = tokenizer.apply_chat_template(
|
| 33 |
+
# req.messages,
|
| 34 |
+
# tokenize=False,
|
| 35 |
+
# add_generation_prompt=True
|
| 36 |
+
# )
|
| 37 |
+
# inputs = tokenizer(text, return_tensors="pt").to(model.device)
|
| 38 |
+
|
| 39 |
+
prompt = ""
|
| 40 |
+
for msg in req.messages:
|
| 41 |
+
prompt += f"{msg['role']}: {msg['content']}\n"
|
| 42 |
+
prompt += "assistant:"
|
| 43 |
|
| 44 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 45 |
|
| 46 |
# Generate
|
| 47 |
outputs = model.generate(
|