Spaces:
Sleeping
Sleeping
File size: 1,812 Bytes
2b8db5e 71681f4 5afad7e fc5b9bc 71681f4 5afad7e 71681f4 5afad7e 71681f4 5afad7e 71681f4 5afad7e 71681f4 5afad7e 71681f4 5afad7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# import os
# from transformers import AutoModelForCausalLM, AutoTokenizer
# from peft import PeftModel
# def load_model():
# hf_token = os.getenv("HF_TOKEN")
# if not hf_token:
# raise RuntimeError("HF_TOKEN not set.")
# # Use a user-writable cache directory (important for Docker non-root)
# HF_CACHE = os.path.expanduser("~/.cache/huggingface")
# os.makedirs(HF_CACHE, exist_ok=True)
# os.environ["TRANSFORMERS_CACHE"] = HF_CACHE
# os.environ["HF_HOME"] = HF_CACHE
# base_model = AutoModelForCausalLM.from_pretrained(
# "meta-llama/Llama-2-7b-chat-hf",
# use_auth_token=hf_token,
# cache_dir="/tmp/hf_cache",
# torch_dtype="auto",
# device_map="auto",
# load_in_8bit=True # <-- Try enabling 8-bit
# )
# model = PeftModel.from_pretrained(
# base_model,
# "BrainGPT/BrainGPT-7B-v0.1",
# use_auth_token=hf_token,
# cache_dir="/tmp/hf_cache"
# )
# tokenizer = AutoTokenizer.from_pretrained(
# "meta-llama/Llama-2-7b-chat-hf",
# use_auth_token=hf_token,
# cache_dir="/tmp/hf_cache"
# )
# return model, tokenizer
## GPT 2 Model
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
def load_model():
# Use a user-writable cache directory (important for Docker non-root)
HF_CACHE = os.path.expanduser("~/.cache/huggingface")
os.makedirs(HF_CACHE, exist_ok=True)
os.environ["TRANSFORMERS_CACHE"] = HF_CACHE
os.environ["HF_HOME"] = HF_CACHE
model_name = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(
model_name,
cache_dir=HF_CACHE
)
model = AutoModelForCausalLM.from_pretrained(
model_name,
cache_dir=HF_CACHE
)
return model, tokenizer
|