Bindu190 commited on
Commit
34a024c
·
verified ·
1 Parent(s): 71fa4f8

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +47 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
4
+ from peft import PeftModel
5
+
6
+ model_name = "HuggingFaceH4/zephyr-7b-beta"
7
+ adapter_path = "zephyr_lora_adapter"
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(adapter_path)
10
+
11
+ bnb_config = BitsAndBytesConfig(
12
+ load_in_4bit=True,
13
+ bnb_4bit_use_double_quant=True,
14
+ bnb_4bit_quant_type="nf4",
15
+ bnb_4bit_compute_dtype=torch.float16
16
+ )
17
+
18
+ base_model = AutoModelForCausalLM.from_pretrained(
19
+ model_name,
20
+ quantization_config=bnb_config,
21
+ device_map="auto",
22
+ trust_remote_code=True
23
+ )
24
+
25
+ model = PeftModel.from_pretrained(base_model, adapter_path)
26
+ model.eval()
27
+
28
+ def solve_math(question, max_tokens=512):
29
+ prompt = f"<|user|>\n{question}\n<|assistant|>\n"
30
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
31
+ with torch.no_grad():
32
+ outputs = model.generate(
33
+ **inputs,
34
+ max_new_tokens=max_tokens,
35
+ do_sample=False,
36
+ pad_token_id=tokenizer.pad_token_id or tokenizer.eos_token_id
37
+ )
38
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
39
+ return decoded.split("<|assistant|>")[-1].strip()
40
+
41
+ demo = gr.Interface(fn=solve_math,
42
+ inputs=gr.Textbox(lines=5, label="Enter math problem"),
43
+ outputs=gr.Textbox(label="Solution"),
44
+ title="Math Solver (Zephyr Fine-Tuned)",
45
+ description="This app uses a fine-tuned LLM to solve school-level math problems step by step.")
46
+
47
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ peft
5
+ bitsandbytes