Vivek16 commited on
Commit
0debc19
·
verified ·
1 Parent(s): 638621c

Create appy.py

Browse files
Files changed (1) hide show
  1. appy.py +89 -0
appy.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from peft import PeftModel
5
+
6
+ # app.py snippet
7
+
8
+ # --- Configuration ---
9
+ # ⚠️ YOUR USERNAME IS NOW SET
10
+ BASE_MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
11
+ ADAPTER_MODEL_ID = "Vivek16/Root_Math-TinyLlama-CPU" # <-- Replace 'Vivek16' with your actual username
12
+ # Define the instruction template used during fine-tuning (Step 5)
13
+ INSTRUCTION_TEMPLATE = "<|system|>\nSolve the following math problem:</s>\n<|user|>\n{}</s>\n<|assistant|>"
14
+
15
+ # --- Model Loading Function ---
16
+ def load_model():
17
+ """Loads the base model and merges the LoRA adapters."""
18
+ print("Loading base model...")
19
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
20
+ model = AutoModelForCausalLM.from_pretrained(
21
+ BASE_MODEL_ID,
22
+ torch_dtype=torch.bfloat16, # Use bfloat16 for efficiency on CPU
23
+ device_map="cpu"
24
+ )
25
+
26
+ print("Loading and merging PEFT adapters...")
27
+ # Load the trained LoRA adapters from your repo
28
+ model = PeftModel.from_pretrained(model, ADAPTER_MODEL_ID)
29
+ # Merge the adapter weights into the base model weights
30
+ model = model.merge_and_unload()
31
+ model.eval()
32
+
33
+ # Ensure pad token is set for generation
34
+ if tokenizer.pad_token is None:
35
+ tokenizer.pad_token = tokenizer.eos_token
36
+
37
+ print("Model loaded and merged successfully!")
38
+ return tokenizer, model
39
+
40
+ # Load the model outside the prediction function for efficiency
41
+ tokenizer, model = load_model()
42
+
43
+ # --- Prediction Function ---
44
+ def generate_response(prompt):
45
+ """Generates a response using the fine-tuned model."""
46
+ # 1. Format the user input using the exact chat template
47
+ formatted_prompt = INSTRUCTION_TEMPLATE.format(prompt)
48
+
49
+ # 2. Tokenize the input
50
+ inputs = tokenizer(formatted_prompt, return_tensors="pt")
51
+
52
+ # 3. Generate the response (on CPU)
53
+ with torch.no_grad():
54
+ output_tokens = model.generate(
55
+ **inputs,
56
+ max_new_tokens=256,
57
+ do_sample=True,
58
+ temperature=0.7,
59
+ top_k=50,
60
+ pad_token_id=tokenizer.eos_token_id
61
+ )
62
+
63
+ # 4. Decode the output and strip the prompt
64
+ generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=False)
65
+
66
+ # Extract only the assistant's response (everything after the last <|assistant|> tag)
67
+ response_start = generated_text.rfind('<|assistant|>')
68
+ if response_start != -1:
69
+ # Get the text after <|assistant|> and strip the trailing </s>
70
+ assistant_response = generated_text[response_start + len('<|assistant|>'):].strip().split('</s>')[0].strip()
71
+ else:
72
+ assistant_response = "Error: Could not parse model output."
73
+
74
+ return assistant_response
75
+
76
+ # --- Gradio Interface ---
77
+ title = "Root Math TinyLlama 1.1B - CPU Fine-Tuned"
78
+ description = "A CPU-friendly TinyLlama model fine-tuned on the Big-Math-RL-Verified dataset using LoRA."
79
+ article = "Model repository: " + ADAPTER_MODEL_ID
80
+
81
+ gr.Interface(
82
+ fn=generate_response,
83
+ inputs=gr.Textbox(lines=5, label="Enter your math problem here:"),
84
+ outputs=gr.Textbox(label="Model Answer"),
85
+ title=title,
86
+ description=description,
87
+ article=article,
88
+ theme="soft"
89
+ ).launch()