joonsak commited on
Commit
534738c
·
verified ·
1 Parent(s): 969b646

Updated app.py to final version

Browse files

changed into the final version if there is issues with the query's might need to change the prompt around

Files changed (1) hide show
  1. app.py +124 -12
app.py CHANGED
@@ -3,8 +3,13 @@ import sympy as sp
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
- MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
7
- SYSTEM_PROMPT = "You are a helpful tutor. Match the user's level."
 
 
 
 
 
8
 
9
  tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
10
  model = AutoModelForCausalLM.from_pretrained(
@@ -14,6 +19,24 @@ model = AutoModelForCausalLM.from_pretrained(
14
  )
15
  model.eval()
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  def verify_math(expr_str: str) -> str:
18
  try:
19
  expr = sp.sympify(expr_str)
@@ -25,34 +48,123 @@ def verify_math(expr_str: str) -> str:
25
  def generate(question: str, level: str, step_by_step: bool) -> str:
26
  if not question.strip():
27
  return "Please enter a question."
28
- style = f"Level: {level}. {'Explain step-by-step.' if step_by_step else 'Be concise.'}"
29
  prompt = f"System: {SYSTEM_PROMPT}\n{style}\nUser: {question}\nAssistant:"
30
  inputs = tok(prompt, return_tensors="pt")
31
  with torch.no_grad():
32
  out = model.generate(
33
  **inputs,
34
- max_new_tokens=192,
35
- do_sample=True,
36
  temperature=0.7,
37
- top_p=0.95,
 
 
38
  pad_token_id=tok.eos_token_id
39
  )
40
  text = tok.decode(out[0], skip_special_tokens=True)
41
  if "Assistant:" in text:
42
- text = text.split("Assistant:", 1)[1].strip()
43
  is_math = any(ch in question for ch in "+-*/=^") or question.lower().startswith(("simplify","derive","integrate"))
44
  sympy_note = verify_math(question) if is_math else "No math verification needed."
45
  return f"{text}\n\n---\n**SymPy check:** {sympy_note}\n_Status: Transformers CPU_"
46
 
 
47
  def build_app():
48
  with gr.Blocks(title="LearnLoop — CPU Space") as demo:
49
- gr.Markdown("# LearnLoop — CPU-only demo")
50
- q = gr.Textbox(label="Your question", placeholder="e.g., simplify (x^2 - 1)/(x - 1)")
51
- level = gr.Dropdown(choices=["Beginner","Intermediate","Advanced"], value="Beginner", label="Level")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  step = gr.Checkbox(value=True, label="Step-by-step")
53
- btn = gr.Button("Explain"); out = gr.Markdown()
54
- btn.click(generate, [q, level, step], out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  return demo
56
 
 
57
  if __name__ == "__main__":
58
  build_app().launch()
 
3
  import torch
4
  from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+
7
+ MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
8
+ SYSTEM_PROMPT = """
9
+ You are a helpful tutor who always avoid hashtags, emojis, or social media style text.
10
+
11
+ """
12
+
13
 
14
  tok = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
15
  model = AutoModelForCausalLM.from_pretrained(
 
19
  )
20
  model.eval()
21
 
22
+ def level_style(level: str, step: bool) -> str:
23
+ if level == "Beginner":
24
+ return (
25
+ "Simple and short answer to the questions."
26
+ "Use short sentences and sometimes give small examples. "
27
+ + ("Show each step clearly." if step else "Keep it short and clear.")
28
+ )
29
+ elif level == "Intermediate":
30
+ return (
31
+ "Explain with moderate detail. Use correct terminology but keep it approachable. "
32
+ + ("Give step-by-step reasoning." if step else "Keep it short and clear.")
33
+ )
34
+ else: # Advanced
35
+ return (
36
+ "Use precise and technical language. Assume the user has strong background knowledge involving the matter."
37
+ + ("Show reasoning steps briefly." if step else "Be concise and analytical.")
38
+ )
39
+
40
  def verify_math(expr_str: str) -> str:
41
  try:
42
  expr = sp.sympify(expr_str)
 
48
  def generate(question: str, level: str, step_by_step: bool) -> str:
49
  if not question.strip():
50
  return "Please enter a question."
51
+ style = level_style(level, step_by_step)
52
  prompt = f"System: {SYSTEM_PROMPT}\n{style}\nUser: {question}\nAssistant:"
53
  inputs = tok(prompt, return_tensors="pt")
54
  with torch.no_grad():
55
  out = model.generate(
56
  **inputs,
57
+ max_new_tokens=192, # was 384
58
+ do_sample=True, # False was True
59
  temperature=0.7,
60
+ top_p=0.9,
61
+ repetition_penalty=1.2,
62
+ no_repeat_ngram_size=3,
63
  pad_token_id=tok.eos_token_id
64
  )
65
  text = tok.decode(out[0], skip_special_tokens=True)
66
  if "Assistant:" in text:
67
+ text = text.split("Assistant:", 1)[1].strip()
68
  is_math = any(ch in question for ch in "+-*/=^") or question.lower().startswith(("simplify","derive","integrate"))
69
  sympy_note = verify_math(question) if is_math else "No math verification needed."
70
  return f"{text}\n\n---\n**SymPy check:** {sympy_note}\n_Status: Transformers CPU_"
71
 
72
+ # Building app and IU
73
  def build_app():
74
  with gr.Blocks(title="LearnLoop — CPU Space") as demo:
75
+
76
+ # CSS styles and adding colours
77
+ gr.HTML("""
78
+ <style>
79
+
80
+ .gradio-container {
81
+ background-color: #EDF6FA !important; /* haalea sininen */
82
+ padding: 24px;
83
+ border-radius: 12px;
84
+ box-shadow: 0 4px 12px rgba(0,0,0,0.05);
85
+ }
86
+
87
+ /* buttons */
88
+ button {
89
+ border-radius: 8px;
90
+ transition: all 0.2s ease-in-out;
91
+ font-weight: 500;
92
+ letter-spacing: 0.5px;
93
+ }
94
+ button:hover {
95
+ opacity: 0.9;
96
+ transform: translateY(-1px);
97
+ }
98
+ button:active {
99
+ filter: brightness(85%);
100
+ transform: scale(0.98);
101
+ }
102
+ /* Explain ja Reset buttons */
103
+ #explain-btn {
104
+ background-color: #5499C7;
105
+ color: white;
106
+ border: 2px solid #2E86C1;
107
+ }
108
+ #reset-btn {
109
+ background-color: #EC7063;
110
+ color: white;
111
+ border: 2px solid #CB4335;
112
+ }
113
+ #explain-btn:hover, #reset-btn:hover {
114
+ opacity: 0.85;
115
+ }
116
+ #explain-btn:active, #reset-btn:active {
117
+ filter: brightness(85%);
118
+ transform: scale(0.98);
119
+ }
120
+ </style>
121
+ """)
122
+
123
+
124
+ # prints using instructions
125
+ gr.Markdown("""
126
+ # **LearnL**<span style="font-size:1.2em; color: #21618C">∞</span>**p — AI Tutor**
127
+ This app uses the [Qwen 2.5 model](https://huggingface.co/Qwen/Qwen2.5-0.5B-Instruct)
128
+ to explain questions at different skill levels. It can also verify
129
+ mathematical expressions using the SymPy library.
130
+ **How to use:**
131
+ 1️⃣ Type your question or a mathematical expression.
132
+ 2️⃣ Select your level (Beginner, Intermediate, Advanced).
133
+ 3️⃣ Choose whether you want a step-by-step explanation.
134
+ 4️⃣ Press **"Explain"** or **Enter** on your keyboard.
135
+ 5️⃣ If you want to enter a new question, you can press **"Reset"** or simply **type a new question**.
136
+
137
+ 💬 You can ask your question in **English**.
138
+ """)
139
+
140
+ # User's feed
141
+ q = gr.Textbox(label="Your question", placeholder="e.g., simplify (x^2 - 1)/(x - 1)", elem_id="question-box")
142
+ level = gr.Dropdown(choices=["Beginner", "Intermediate", "Advanced"], value="Beginner", label="Level")
143
  step = gr.Checkbox(value=True, label="Step-by-step")
144
+
145
+
146
+ # Results
147
+ loading = gr.Markdown(visible=False) # spinner hided at first
148
+ out = gr.Markdown()
149
+
150
+ # Buttons next to each other
151
+ with gr.Row():
152
+ btn = gr.Button("Explain", elem_id="explain-btn")
153
+ reset_btn = gr.ClearButton([q, out, loading], value="Reset", elem_id="reset-btn")
154
+
155
+ # connect to generate function with spinner
156
+ def wrapped_generate(q_val, level_val, step_val):
157
+ # Näytetään spinner ensin
158
+ loading_text = "⏳ Generating explanation..."
159
+ result = generate(q_val, level_val, step_val)
160
+ # hide spinner when ready
161
+ return "", result
162
+
163
+ btn.click(fn=wrapped_generate, inputs=[q, level, step], outputs=[loading, out])
164
+ q.submit(fn=wrapped_generate, inputs=[q, level, step], outputs=[loading, out])
165
+
166
  return demo
167
 
168
+ # start the app
169
  if __name__ == "__main__":
170
  build_app().launch()