sm110101 commited on
Commit
c2adc1a
Β·
1 Parent(s): 1d5f7ee

Setup App interface

Browse files
Files changed (2) hide show
  1. app.py +54 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, torch, gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
3
+ from peft import PeftModel
4
+
5
+ BASE_MODEL = os.getenv("BASE_MODEL", "mistralai/Mistral-7B-Instruct-v0.2")
6
+ LORA_REPO = os.getenv("LORA_REPO", "YOUR_USERNAME/DSAN-5800-LoRA-mistral7b-r8")
7
+ HF_TOKEN = os.getenv("HF_TOKEN") # set only if repos are private
8
+
9
+ def load_model():
10
+ tok = AutoTokenizer.from_pretrained(BASE_MODEL, use_fast=True, token=HF_TOKEN)
11
+ if tok.pad_token is None and tok.eos_token is not None:
12
+ tok.pad_token = tok.eos_token; tok.padding_side = "left"
13
+ quant = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True,
14
+ bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16)
15
+ base = AutoModelForCausalLM.from_pretrained(BASE_MODEL, device_map="auto",
16
+ torch_dtype=torch.float16, quantization_config=quant,
17
+ token=HF_TOKEN)
18
+ model = PeftModel.from_pretrained(base, LORA_REPO, device_map="auto", token=HF_TOKEN)
19
+ model.eval()
20
+ return model, tok
21
+
22
+ model, tokenizer = load_model()
23
+
24
+ def build_prompt(instruction: str) -> str:
25
+ msgs = [{"role":"system","content":"You are a Python coding assistant. Produce correct, clean, efficient Python."},
26
+ {"role":"user","content":instruction}]
27
+ try:
28
+ return tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)
29
+ except Exception:
30
+ return f"System: You are a Python coding assistant.\nUser: {instruction}\nAssistant:"
31
+
32
+ def infer(instruction, max_new_tokens, temperature, top_p):
33
+ prompt = build_prompt(instruction)
34
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
35
+ with torch.no_grad():
36
+ out = model.generate(**inputs, do_sample=True, temperature=float(temperature),
37
+ top_p=float(top_p), max_new_tokens=int(max_new_tokens),
38
+ pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id)
39
+ text = tokenizer.decode(out[0], skip_special_tokens=True)
40
+ return text[len(prompt):].strip() if text.startswith(prompt) else text
41
+
42
+ demo = gr.Interface(
43
+ fn=infer,
44
+ inputs=[gr.Textbox(label="Instruction", lines=8),
45
+ gr.Slider(32, 2048, value=512, step=32, label="max_new_tokens"),
46
+ gr.Slider(0.0, 1.0, value=0.2, step=0.05, label="temperature"),
47
+ gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="top_p")],
48
+ outputs=gr.Code(label="Model output (Python)", language="python"),
49
+ title="DSAN-5800 LoRA Demo",
50
+ description="Mistral 7B + LoRA adapter with 4-bit inference."
51
+ )
52
+
53
+ if __name__ == "__main__":
54
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers>=4.44.0
2
+ accelerate>=0.33.0
3
+ peft>=0.13.0
4
+ bitsandbytes>=0.43.1
5
+ gradio>=4.44.0
6
+ huggingface_hub>=0.24.0
7
+ sentencepiece>=0.1.99