Update README.md
Browse files
README.md
CHANGED
|
@@ -30,16 +30,16 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 30 |
torch_dtype='auto'
|
| 31 |
).eval()
|
| 32 |
|
| 33 |
-
# Prompt content: "
|
| 34 |
messages = [
|
| 35 |
-
{"role": "user", "content": "
|
| 36 |
]
|
| 37 |
|
| 38 |
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
|
| 39 |
output_ids = model.generate(input_ids.to('cuda'))
|
| 40 |
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
|
| 41 |
|
| 42 |
-
# Model response:
|
| 43 |
print(response)
|
| 44 |
```
|
| 45 |
|
|
|
|
| 30 |
torch_dtype='auto'
|
| 31 |
).eval()
|
| 32 |
|
| 33 |
+
# Prompt content: "Pẹlẹ o. Bawo ni o se wa?" ("Hello. How are you?")
|
| 34 |
messages = [
|
| 35 |
+
{"role": "user", "content": "Pẹlẹ o. Bawo ni o se wa?"}
|
| 36 |
]
|
| 37 |
|
| 38 |
input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
|
| 39 |
output_ids = model.generate(input_ids.to('cuda'))
|
| 40 |
response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
|
| 41 |
|
| 42 |
+
# Model response:
|
| 43 |
print(response)
|
| 44 |
```
|
| 45 |
|