File size: 535 Bytes
9ae4384 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
def load_model(model_name="your-username/sentinel"):
"""
Load Sentinel model and tokenizer.
"""
print(f"Loading {model_name}...")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto", # Uses GPU if available
trust_remote_code=True
)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
return generator
|