Upload folder using huggingface_hub
Browse files
README.md
CHANGED
|
@@ -75,14 +75,16 @@ This implementation matches the `DoLa` functionality present in `transformers<4.
|
|
| 75 |
```python
|
| 76 |
# requires `transformers>=4.56.0`, previously, it was part of the library
|
| 77 |
import torch
|
| 78 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
| 79 |
|
| 80 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
| 81 |
model = AutoModelForCausalLM.from_pretrained(
|
| 82 |
"Qwen/Qwen3-0.6B", torch_dtype=torch.float16
|
| 83 |
-
).to(
|
| 84 |
|
| 85 |
-
inputs = tokenizer("What is the highest peak in the world?", return_tensors="pt").to(
|
| 86 |
|
| 87 |
outputs = model.generate(
|
| 88 |
**inputs,
|
|
@@ -102,14 +104,16 @@ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
|
| 102 |
|
| 103 |
```python
|
| 104 |
import torch
|
| 105 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
|
|
|
| 106 |
|
| 107 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
| 108 |
model = AutoModelForCausalLM.from_pretrained(
|
| 109 |
"Qwen/Qwen3-0.6B", torch_dtype=torch.float16
|
| 110 |
-
).to(
|
| 111 |
|
| 112 |
-
inputs = tokenizer("What is the highest peak in the world?", return_tensors="pt").to(
|
| 113 |
|
| 114 |
outputs = model.generate(
|
| 115 |
**inputs,
|
|
|
|
| 75 |
```python
|
| 76 |
# requires `transformers>=4.56.0`, previously, it was part of the library
|
| 77 |
import torch
|
| 78 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
| 79 |
+
|
| 80 |
+
device = infer_device()
|
| 81 |
|
| 82 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
| 83 |
model = AutoModelForCausalLM.from_pretrained(
|
| 84 |
"Qwen/Qwen3-0.6B", torch_dtype=torch.float16
|
| 85 |
+
).to(device)
|
| 86 |
|
| 87 |
+
inputs = tokenizer("What is the highest peak in the world?", return_tensors="pt").to(device)
|
| 88 |
|
| 89 |
outputs = model.generate(
|
| 90 |
**inputs,
|
|
|
|
| 104 |
|
| 105 |
```python
|
| 106 |
import torch
|
| 107 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
| 108 |
+
|
| 109 |
+
device = infer_device()
|
| 110 |
|
| 111 |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
| 112 |
model = AutoModelForCausalLM.from_pretrained(
|
| 113 |
"Qwen/Qwen3-0.6B", torch_dtype=torch.float16
|
| 114 |
+
).to(device)
|
| 115 |
|
| 116 |
+
inputs = tokenizer("What is the highest peak in the world?", return_tensors="pt").to(device)
|
| 117 |
|
| 118 |
outputs = model.generate(
|
| 119 |
**inputs,
|