Update app.py
Browse files
app.py
CHANGED
|
@@ -16,7 +16,7 @@ def chat(query: str):
|
|
| 16 |
GET /chat?query=Your+question
|
| 17 |
Returns JSON: {"answer": "...model’s reply..."}
|
| 18 |
"""
|
| 19 |
-
|
| 20 |
prompt = (
|
| 21 |
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>"
|
| 22 |
"<|im_start|>user\n" + query + "<|im_end|>"
|
|
@@ -24,7 +24,7 @@ def chat(query: str):
|
|
| 24 |
)
|
| 25 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 26 |
outputs = model.generate(**inputs, max_new_tokens=200)
|
| 27 |
-
|
| 28 |
response = tokenizer.decode(
|
| 29 |
outputs[0][inputs.input_ids.shape[-1]:],
|
| 30 |
skip_special_tokens=True
|
|
|
|
| 16 |
GET /chat?query=Your+question
|
| 17 |
Returns JSON: {"answer": "...model’s reply..."}
|
| 18 |
"""
|
| 19 |
+
|
| 20 |
prompt = (
|
| 21 |
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>"
|
| 22 |
"<|im_start|>user\n" + query + "<|im_end|>"
|
|
|
|
| 24 |
)
|
| 25 |
inputs = tokenizer(prompt, return_tensors="pt")
|
| 26 |
outputs = model.generate(**inputs, max_new_tokens=200)
|
| 27 |
+
|
| 28 |
response = tokenizer.decode(
|
| 29 |
outputs[0][inputs.input_ids.shape[-1]:],
|
| 30 |
skip_special_tokens=True
|