Vivek16 commited on
Commit
a490c85
·
verified ·
1 Parent(s): 1848a57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -60
app.py CHANGED
@@ -1,70 +1,88 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
 
 
 
 
4
 
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
 
19
- messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
 
 
 
 
20
 
21
- messages.extend(history)
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- messages.append({"role": "user", "content": message})
 
24
 
25
- response = ""
 
 
 
 
 
 
 
26
 
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
 
 
41
 
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
- if __name__ == "__main__":
70
- demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from peft import PeftModel
5
 
6
+ # --- Configuration ---
7
+ # ⚠️ REPLACE 'YOUR_HF_USERNAME' with your actual username
8
+ BASE_MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
9
+ ADAPTER_MODEL_ID = "YOUR_HF_USERNAME/Root_Math-TinyLlama-CPU"
10
 
11
+ # Define the instruction template used during fine-tuning (Step 5)
12
+ INSTRUCTION_TEMPLATE = "<|system|>\nSolve the following math problem:</s>\n<|user|>\n{}</s>\n<|assistant|>"
 
 
 
 
 
 
 
 
 
 
 
13
 
14
+ # --- Model Loading Function ---
15
+ def load_model():
16
+ """Loads the base model and merges the LoRA adapters."""
17
+ print("Loading base model...")
18
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
19
+ model = AutoModelForCausalLM.from_pretrained(
20
+ BASE_MODEL_ID,
21
+ torch_dtype=torch.bfloat16, # Use bfloat16 for efficiency on CPU
22
+ device_map="cpu"
23
+ )
24
 
25
+ print("Loading and merging PEFT adapters...")
26
+ # Load the trained LoRA adapters from your repo
27
+ model = PeftModel.from_pretrained(model, ADAPTER_MODEL_ID)
28
+ # Merge the adapter weights into the base model weights
29
+ model = model.merge_and_unload()
30
+ model.eval()
31
+
32
+ # Ensure pad token is set for generation
33
+ if tokenizer.pad_token is None:
34
+ tokenizer.pad_token = tokenizer.eos_token
35
+
36
+ print("Model loaded and merged successfully!")
37
+ return tokenizer, model
38
 
39
+ # Load the model outside the prediction function for efficiency
40
+ tokenizer, model = load_model()
41
 
42
+ # --- Prediction Function ---
43
+ def generate_response(prompt):
44
+ """Generates a response using the fine-tuned model."""
45
+ # 1. Format the user input using the exact chat template
46
+ formatted_prompt = INSTRUCTION_TEMPLATE.format(prompt)
47
+
48
+ # 2. Tokenize the input
49
+ inputs = tokenizer(formatted_prompt, return_tensors="pt")
50
 
51
+ # 3. Generate the response (on CPU)
52
+ with torch.no_grad():
53
+ output_tokens = model.generate(
54
+ **inputs,
55
+ max_new_tokens=256,
56
+ do_sample=True,
57
+ temperature=0.7,
58
+ top_k=50,
59
+ pad_token_id=tokenizer.eos_token_id
60
+ )
61
+
62
+ # 4. Decode the output and strip the prompt
63
+ generated_text = tokenizer.decode(output_tokens[0], skip_special_tokens=False)
64
+
65
+ # Extract only the assistant's response (everything after the last <|assistant|> tag)
66
+ response_start = generated_text.rfind('<|assistant|>')
67
+ if response_start != -1:
68
+ # Get the text after <|assistant|> and strip the trailing </s>
69
+ assistant_response = generated_text[response_start + len('<|assistant|>'):].strip().split('</s>')[0].strip()
70
+ else:
71
+ assistant_response = "Error: Could not parse model output."
72
+
73
+ return assistant_response
74
 
75
+ # --- Gradio Interface ---
76
+ title = "Root Math TinyLlama 1.1B - CPU Fine-Tuned"
77
+ description = "A CPU-friendly TinyLlama model fine-tuned on the Big-Math-RL-Verified dataset using LoRA."
78
+ article = "Model repository: " + ADAPTER_MODEL_ID
79
 
80
+ gr.Interface(
81
+ fn=generate_response,
82
+ inputs=gr.Textbox(lines=5, label="Enter your math problem here:"),
83
+ outputs=gr.Textbox(label="Model Answer"),
84
+ title=title,
85
+ description=description,
86
+ article=article,
87
+ theme="soft"
88
+ ).launch()