File size: 2,750 Bytes
c2adc1a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import os, torch, gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import PeftModel

BASE_MODEL = os.getenv("BASE_MODEL", "mistralai/Mistral-7B-Instruct-v0.2")
LORA_REPO  = os.getenv("LORA_REPO",  "YOUR_USERNAME/DSAN-5800-LoRA-mistral7b-r8")
HF_TOKEN   = os.getenv("HF_TOKEN")  # set only if repos are private

def load_model():
    tok = AutoTokenizer.from_pretrained(BASE_MODEL, use_fast=True, token=HF_TOKEN)
    if tok.pad_token is None and tok.eos_token is not None:
        tok.pad_token = tok.eos_token; tok.padding_side = "left"
    quant = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True,
                               bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16)
    base = AutoModelForCausalLM.from_pretrained(BASE_MODEL, device_map="auto",
                                                torch_dtype=torch.float16, quantization_config=quant,
                                                token=HF_TOKEN)
    model = PeftModel.from_pretrained(base, LORA_REPO, device_map="auto", token=HF_TOKEN)
    model.eval()
    return model, tok

model, tokenizer = load_model()

def build_prompt(instruction: str) -> str:
    msgs = [{"role":"system","content":"You are a Python coding assistant. Produce correct, clean, efficient Python."},
            {"role":"user","content":instruction}]
    try:
        return tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)
    except Exception:
        return f"System: You are a Python coding assistant.\nUser: {instruction}\nAssistant:"

def infer(instruction, max_new_tokens, temperature, top_p):
    prompt = build_prompt(instruction)
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    with torch.no_grad():
        out = model.generate(**inputs, do_sample=True, temperature=float(temperature),
                             top_p=float(top_p), max_new_tokens=int(max_new_tokens),
                             pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id)
    text = tokenizer.decode(out[0], skip_special_tokens=True)
    return text[len(prompt):].strip() if text.startswith(prompt) else text

demo = gr.Interface(
    fn=infer,
    inputs=[gr.Textbox(label="Instruction", lines=8),
            gr.Slider(32, 2048, value=512, step=32, label="max_new_tokens"),
            gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="temperature"),
            gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="top_p")],
    outputs=gr.Code(label="Model output (Python)", language="python"),
    title="DSAN-5800 LoRA Demo",
    description="Mistral 7B + LoRA adapter with 4-bit inference."
)

if __name__ == "__main__":
    demo.launch()