from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "meta-llama/Meta-Llama-Guard-2-8B" tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=True) model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto") def ask_llama(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) output = model.generate(**inputs, max_new_tokens=200) response = tokenizer.decode(output[0], skip_special_tokens=True) return response