File size: 423 Bytes
030876e | 1 2 3 4 5 6 7 8 9 10 11 12 | import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from transformers import AutoTokenizer, AutoModelForCausalLM
name = "google/gemma-3-4b-it" # or your Gemma 3 variant
tokenizer = AutoTokenizer.from_pretrained(name)
model = AutoModelForCausalLM.from_pretrained(name)
print(tokenizer.eos_token, tokenizer.eos_token_id)
print(model.config.eos_token, model.config.eos_token_id)
|