Update Readme.md
Browse files
README.md
CHANGED
|
@@ -46,14 +46,24 @@ The authors recommend using `n_sigma=1.0` for most use cases, but you can experi
|
|
| 46 |
|
| 47 |
```py
|
| 48 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
# There is a print message hardcoded in the custom generation method
|
| 55 |
-
gen_out = model.generate(**
|
| 56 |
-
|
|
|
|
| 57 |
```
|
| 58 |
|
| 59 |
### Citation
|
|
|
|
| 46 |
|
| 47 |
```py
|
| 48 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 49 |
+
from transformers import GenerationConfig
|
| 50 |
+
|
| 51 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
|
| 52 |
+
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-0.6B", device_map="auto")
|
| 53 |
+
generation_config = GenerationConfig(temperature=1.5, max_length=128)
|
| 54 |
+
|
| 55 |
+
messages = [{"role":"user", "content": "Write a story about a dog and cat becoming friends."}]
|
| 56 |
+
text = tokenizer.apply_chat_template(
|
| 57 |
+
messages,
|
| 58 |
+
tokenize=False,
|
| 59 |
+
add_generation_prompt=True,
|
| 60 |
+
enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
|
| 61 |
+
)
|
| 62 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 63 |
# There is a print message hardcoded in the custom generation method
|
| 64 |
+
gen_out = model.generate(**model_inputs, n_sigma=1.0, generation_config=generation_config, custom_generate="Pramodith/topN_sigma_generation", trust_remote_code=True)
|
| 65 |
+
|
| 66 |
+
print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
|
| 67 |
```
|
| 68 |
|
| 69 |
### Citation
|