Christi049 commited on
Commit
b4ef4c6
·
verified ·
1 Parent(s): 9ffef58

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from peft import PeftModel
4
+ import torch
5
+
6
+ # Base model (4-bit LLaMA)
7
+ base_model_id = "unsloth/Llama-3.2-3B-Instruct-bnb-4bit"
8
+
9
+ # Your LoRA adapter repo
10
+ adapter_id = "Christi049/meal-gen-adapter"
11
+
12
+ # Load tokenizer
13
+ tokenizer = AutoTokenizer.from_pretrained(base_model_id)
14
+
15
+ # Load base model
16
+ base_model = AutoModelForCausalLM.from_pretrained(
17
+ base_model_id,
18
+ torch_dtype=torch.float16,
19
+ device_map="auto"
20
+ )
21
+
22
+ # Apply LoRA adapter
23
+ model = PeftModel.from_pretrained(base_model, adapter_id)
24
+
25
+ def generate_meal(prompt):
26
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
27
+ outputs = model.generate(**inputs, max_new_tokens=300, temperature=0.7)
28
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+
30
+ iface = gr.Interface(
31
+ fn=generate_meal,
32
+ inputs=gr.Textbox(label="Meal Request", placeholder="e.g., Generate a 7-day vegetarian meal plan"),
33
+ outputs=gr.Textbox(label="Generated Meal Plan"),
34
+ title="Weekly Meal Generator"
35
+ )
36
+
37
+ iface.launch()