Update README.md
Browse files
README.md
CHANGED
|
@@ -65,7 +65,22 @@ model.generation_config.pad_token_id = tokenizer.eos_token_id
|
|
| 65 |
|
| 66 |
# Inference simple as transformers library
|
| 67 |
prompt = "Describe basics of DNNs quantization."
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
inputs.to(device)
|
| 70 |
|
| 71 |
with torch.inference_mode:
|
|
|
|
| 65 |
|
| 66 |
# Inference simple as transformers library
|
| 67 |
prompt = "Describe basics of DNNs quantization."
|
| 68 |
+
messages = [
|
| 69 |
+
{
|
| 70 |
+
"role": "system",
|
| 71 |
+
"content": "You are a search bot, answer on user text queries."
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"role": "user",
|
| 75 |
+
"content": prompt
|
| 76 |
+
}
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
chat_prompt = tokenizer.apply_chat_template(
|
| 80 |
+
messages, add_generation_prompt=True, tokenize=False
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
inputs = tokenizer(chat_prompt, return_tensors="pt")
|
| 84 |
inputs.to(device)
|
| 85 |
|
| 86 |
with torch.inference_mode:
|