File size: 859 Bytes
36c45af
6986b77
36c45af
6986b77
 
 
94bcca6
334b7f2
6986b77
94bcca6
6986b77
 
d44146d
36c45af
6986b77
fe708c7
6986b77
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
from peft import PeftModel

BASE = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"   # ← your base
REPO = "savan360/Qwen_prompt_creator"              # your repo (root)
SUBFOLDER = "adapter"                               # where adapter files live

tok = AutoTokenizer.from_pretrained(BASE, trust_remote_code=True)
if tok.pad_token is None and hasattr(tok, "eos_token"): tok.pad_token = tok.eos_token

bnb = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4",
                         bnb_4bit_compute_dtype="float16", bnb_4bit_use_double_quant=True)

base_model = AutoModelForCausalLM.from_pretrained(
    BASE, trust_remote_code=True, device_map="auto", quantization_config=bnb, dtype="auto"
)
model = PeftModel.from_pretrained(base_model, REPO, subfolder=SUBFOLDER)