Gsm8 / app.py
Madras1's picture
Create app.py
639edf6 verified
raw
history blame contribute delete
748 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
model_id = "Madras1/qwen-3b-reasoning-gsm8k"
# Carregamento otimizado (4-bit se tiver pouca VRAM, ou float16 padrão)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.float16,
device_map="auto"
)
prompt = "Resolva: 25 - 4 * 2 + 3"
messages = [{"role": "user", "content": prompt}]
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
inputs = tokenizer([text], return_tensors="pt").to("cuda")
outputs = model.generate(
**inputs,
max_new_tokens=512,
temperature=0.7
)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))