Spaces:
Sleeping
Sleeping
File size: 366 Bytes
f179e9f | 1 2 3 4 5 6 7 8 9 10 11 12 13 | from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2"
def load_model():
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16,
device_map="auto"
)
return tokenizer, model |