Update README.md
Browse files
README.md
CHANGED
|
@@ -22,7 +22,7 @@ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
|
| 22 |
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
|
| 23 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
| 24 |
|
| 25 |
-
messages = [{"role": "user", "content": "
|
| 26 |
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
| 27 |
print(input_text)
|
| 28 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
|
|
|
| 22 |
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
|
| 23 |
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
| 24 |
|
| 25 |
+
messages = [{"role": "user", "content": "Столица России?"}]
|
| 26 |
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
|
| 27 |
print(input_text)
|
| 28 |
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|