| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import torch |
|
|
| model_id = "Madras1/qwen-3b-reasoning-gsm8k" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype=torch.float16, |
| device_map="auto" |
| ) |
|
|
| prompt = "Resolva: 25 - 4 * 2 + 3" |
| messages = [{"role": "user", "content": prompt}] |
| text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
|
| inputs = tokenizer([text], return_tensors="pt").to("cuda") |
|
|
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=512, |
| temperature=0.7 |
| ) |
|
|
| print(tokenizer.decode(outputs[0], skip_special_tokens=True)) |
|
|