Wfafa commited on
Commit
886e1d8
·
verified ·
1 Parent(s): 034c59f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -130
app.py CHANGED
@@ -3,175 +3,120 @@ import gradio as gr
3
  import requests
4
  import json
5
 
6
- # 🌍 Web search function
7
- def search_web(query):
8
- try:
9
- url = "https://api.duckduckgo.com/"
10
- params = {"q": query, "format": "json", "no_html": 1, "skip_disambig": 1}
11
- response = requests.get(url, params=params)
12
- data = response.json()
13
-
14
- if data.get("AbstractText"):
15
- return data["AbstractText"]
16
- elif data.get("RelatedTopics"):
17
- topics = [t.get("Text", "") for t in data["RelatedTopics"] if "Text" in t]
18
- return " ".join(topics[:3]) # just a few results
19
- else:
20
- return "No useful information found."
21
- except Exception as e:
22
- return f"Search error: {e}"
23
-
24
- # 🧠 Memory setup
25
  HF_TOKEN = os.getenv("HF_TOKEN")
26
- MEMORY_FILE = "memory.json"
27
-
28
- def load_memory():
29
- if os.path.exists(MEMORY_FILE):
30
- with open(MEMORY_FILE, "r") as f:
31
- return json.load(f)
32
- return []
33
-
34
- def save_memory(memory):
35
- with open(MEMORY_FILE, "w") as f:
36
- json.dump(memory, f)
37
 
38
- memory = load_memory()
39
-
40
- # 💬 Chat function
41
- def chat_with_model(message, history, context, file_input=None):
42
  if not isinstance(history, list):
43
  history = []
44
 
45
- # 🌍 Web search mode
46
- if message.lower().startswith("search "):
47
- query = message[7:]
48
- search_result = search_web(query)
49
- history.append((message, f"🔎 Here's what I found online:\n\n{search_result}"))
50
- save_memory(history)
51
- return history, history
52
-
53
- # 📂 If file is uploaded
54
  if file_input:
55
- file_name = file_input.name
56
- message += f"\n\n📎 (User uploaded a file named '{file_name}')"
57
 
58
- # 🧠 Build conversation
59
  conversation = [
60
  {"role": "system", "content": (
61
- "You are EduAI, a multilingual educational AI assistant created by a Sri Lankan student named Wafa Fazly. "
62
- "When solving math, explain step-by-step like a professional tutor. "
63
- "Use Markdown and LaTeX formatting for equations (use \\[ and \\]). "
64
- "Keep answers neat, structured, and student-friendly."
65
  )}
66
  ]
67
-
68
  for past_user, past_bot in history[-5:]:
69
  conversation.append({"role": "user", "content": past_user})
70
  conversation.append({"role": "assistant", "content": past_bot})
71
-
72
  conversation.append({"role": "user", "content": message})
73
 
74
- # 🚀 Send to Hugging Face model
75
  try:
76
  response = requests.post(
77
- "https://router.huggingface.co/v1/chat/completions",
78
- headers={
79
- "Authorization": f"Bearer {HF_TOKEN}",
80
- "Content-Type": "application/json"
81
- },
82
- json={
83
- "model": "deepseek-ai/DeepSeek-V3.2-Exp:novita",
84
- "messages": conversation
85
- }
86
  )
87
-
88
  data = response.json()
89
- reply = data["choices"][0]["message"]["content"]
90
-
91
- # 🧮 Clean up math formatting
92
- reply = reply.replace("Step", "\n\n**Step")
93
- reply = reply.replace(":", ":**")
94
- reply = reply.replace("\\[", "\n\n\\[")
95
- reply = reply.replace("\\]", "\\]\n\n")
96
-
97
- # ✅ Add Markdown + LaTeX support
98
- if "\\" in reply or "log_" in reply or "^" in reply:
99
- reply = f"{reply}"
100
-
101
  history.append((message, reply))
102
- save_memory(history)
103
  return history, history
104
-
105
  except Exception as e:
106
- print("Error:", e)
107
- history.append((message, "😅 EduAI is having trouble connecting right now. Please try again later!"))
108
  return history, history
109
 
110
- # 📘 Sidebar context update
111
- def update_context(choice):
112
- if not choice:
113
- return "📘 **You are in General Mode.** Ask EduAI anything about your studies!"
114
- return f"📘 **You selected {choice} mode.** Ask anything related to this topic!"
115
-
116
- # 🧹 Clear chat memory
117
  def clear_memory():
118
  if os.path.exists(MEMORY_FILE):
119
  os.remove(MEMORY_FILE)
120
  return [], "🧹 Chat memory cleared! Start fresh."
121
 
122
- # 🎨 Gradio Interface
123
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as iface:
124
- gr.Markdown("# 🎓 **EduAI — Your Smart Study Companion**")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  with gr.Row():
127
- with gr.Column(scale=1, min_width=230):
128
- gr.Markdown("### 🧭 **Main Menu**")
 
 
 
129
 
130
- with gr.Accordion("📚 Subject Tutor", open=False):
131
- subj = gr.Radio(
132
- ["Science 🧪", "ICT 💻", "English 📘", "Mathematics ➗"],
133
- label="Choose a subject"
134
- )
135
 
136
  with gr.Accordion("🗓 Study Planner", open=False):
137
- planner = gr.Radio(
138
- ["View Plan 📅", "Add Task ✏️", "Study Tips 💡"],
139
- label="Planner Options"
140
- )
141
 
142
- with gr.Accordion("🌐 Languages", open=False):
143
- lang = gr.Radio(
144
- ["Learn Sinhala 🇱🇰", "Learn Tamil 🇮🇳", "Learn English 🇬🇧", "Learn Spanish 🇪🇸"],
145
- label="Language Options"
146
- )
147
 
148
  with gr.Accordion("⚙️ Settings", open=False):
149
- clear_btn = gr.Button("🧹 Clear Memory")
150
-
151
- with gr.Accordion("👩‍🎓 About", open=False):
152
- gr.Markdown("""
153
- EduAI – developed and fine-tuned by **Wafa Fazly** using a pre-trained AI model,
154
- to help learners understand **Science, ICT, English, and more** —
155
- in a simple and friendly way! 💬
156
- """)
157
 
158
  with gr.Column(scale=4):
159
- context_display = gr.Markdown("📘 **You are in General Mode.** Ask EduAI anything about your studies!")
160
- chatbot = gr.Chatbot(
161
- label="EduAI Chat",
162
- height=450,
163
- render_markdown=True,
164
- latex_delimiters=[{"left": "$$", "right": "$$", "display": True}, {"left": "\\[", "right": "\\]", "display": True}]
165
- )
166
- msg = gr.Textbox(label="Ask EduAI:")
167
- file_input = gr.File(label="📂 Upload a study file (PDF, DOCX, or image):")
168
- send = gr.Button("Send ✈️")
169
-
170
- # 🪄 Event handlers
171
- subj.change(update_context, inputs=subj, outputs=context_display)
172
- planner.change(update_context, inputs=planner, outputs=context_display)
173
- lang.change(update_context, inputs=lang, outputs=context_display)
174
- send.click(chat_with_model, inputs=[msg, chatbot, context_display, file_input], outputs=[chatbot, chatbot])
175
  clear_btn.click(clear_memory, outputs=[chatbot, context_display])
 
 
 
 
176
 
177
- iface.launch()
 
3
  import requests
4
  import json
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  HF_TOKEN = os.getenv("HF_TOKEN")
7
+ MODEL_URL = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-V3.2-Exp:novita"
 
 
 
 
 
 
 
 
 
 
8
 
9
+ # 🌍 AI response function
10
+ def ai_response(message, history, context, file_input=None):
 
 
11
  if not isinstance(history, list):
12
  history = []
13
 
 
 
 
 
 
 
 
 
 
14
  if file_input:
15
+ message += f"\n\n📎 (User uploaded a file named '{file_input.name}')"
 
16
 
 
17
  conversation = [
18
  {"role": "system", "content": (
19
+ "You are EduAI, a multilingual educational AI assistant. "
20
+ "Explain concepts step-by-step, generate MCQs dynamically when requested, "
21
+ "and provide study tips. Use Markdown and LaTeX for math. Keep answers professional and clear."
 
22
  )}
23
  ]
 
24
  for past_user, past_bot in history[-5:]:
25
  conversation.append({"role": "user", "content": past_user})
26
  conversation.append({"role": "assistant", "content": past_bot})
 
27
  conversation.append({"role": "user", "content": message})
28
 
 
29
  try:
30
  response = requests.post(
31
+ MODEL_URL,
32
+ headers={"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"},
33
+ json={"inputs": conversation, "parameters":{"max_new_tokens":512}}
 
 
 
 
 
 
34
  )
 
35
  data = response.json()
36
+ if isinstance(data, dict) and "error" in data:
37
+ reply = "❌ Error contacting model: " + data["error"]
38
+ else:
39
+ reply = data[0]["generated_text"] if isinstance(data, list) else str(data)
 
 
 
 
 
 
 
 
40
  history.append((message, reply))
 
41
  return history, history
 
42
  except Exception as e:
43
+ history.append((message, f"Error contacting model: {e}"))
 
44
  return history, history
45
 
46
+ # 🧠 Clear chat memory
47
+ MEMORY_FILE = "memory.json"
 
 
 
 
 
48
  def clear_memory():
49
  if os.path.exists(MEMORY_FILE):
50
  os.remove(MEMORY_FILE)
51
  return [], "🧹 Chat memory cleared! Start fresh."
52
 
53
+ # 📘 Sidebar context
54
+ def update_context(choice):
55
+ if not choice:
56
+ return "📘 **General Mode.** Ask EduAI anything!"
57
+ return f"📘 **You selected {choice}.**"
58
+
59
+ # 🧩 Dynamic MCQ generator via AI
60
+ def generate_mcq(topic):
61
+ prompt = f"Create a short 3-question MCQ quiz about {topic} with 4 options each. Indicate the correct answer."
62
+ try:
63
+ response = requests.post(
64
+ MODEL_URL,
65
+ headers={"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"},
66
+ json={"inputs": prompt, "parameters":{"max_new_tokens":400}}
67
+ )
68
+ data = response.json()
69
+ text = data[0]["generated_text"] if isinstance(data, list) else str(data)
70
+ return f"<div style='color:white'>{text.replace(chr(10), '<br>')}</div>"
71
+ except Exception as e:
72
+ return f"❌ Error generating MCQs: {e}"
73
+
74
+ # 🎨 Gradio UI
75
+ custom_css = """
76
+ body {background-color:#1e1e2f; color:white; font-family:'Poppins', sans-serif;}
77
+ #chatbot {background: rgba(30,30,50,0.85); border-radius:12px; backdrop-filter: blur(8px);}
78
+ button.primary {background: linear-gradient(90deg,#7c3aed,#a855f7); color:white; border:none; border-radius:8px;}
79
+ button.primary:hover {background: linear-gradient(90deg,#6d28d9,#9333ea);}
80
+ """
81
+
82
+ with gr.Blocks(css=custom_css) as iface:
83
+ gr.Markdown("<h1 style='text-align:center; color:#a855f7'>🎓 EduAI — Smart Study Companion</h1>")
84
 
85
  with gr.Row():
86
+ with gr.Column(scale=1, min_width=220):
87
+ gr.Markdown("### 🧭 Main Menu")
88
+
89
+ with gr.Accordion("📚 Study Tutor", open=False):
90
+ tutor_sub = gr.Radio(["Math", "Science", "ICT", "English"], label="Select Subject")
91
 
92
+ with gr.Accordion("🧠 MCQ Generator", open=False):
93
+ mcq_topic = gr.Textbox(label="Topic for Quiz")
94
+ gen_mcq_btn = gr.Button("Generate MCQ", elem_classes="primary")
95
+ mcq_output = gr.HTML("<i>Enter a topic and click 'Generate MCQ'</i>")
 
96
 
97
  with gr.Accordion("🗓 Study Planner", open=False):
98
+ planner_opt = gr.Radio(["View Plan", "Add Task", "Study Tips"], label="Planner Options")
 
 
 
99
 
100
+ with gr.Accordion("📖 Study Advisor", open=False):
101
+ advisor_txt = gr.Textbox(label="Ask for Study Advice")
102
+ advisor_btn = gr.Button("Get Advice", elem_classes="primary")
103
+ advisor_output = gr.HTML("<i>Ask your study question here</i>")
 
104
 
105
  with gr.Accordion("⚙️ Settings", open=False):
106
+ clear_btn = gr.Button("🧹 Clear Chat Memory")
 
 
 
 
 
 
 
107
 
108
  with gr.Column(scale=4):
109
+ context_display = gr.Markdown("📘 **General Mode**")
110
+ chatbot = gr.Chatbot(elem_id="chatbot", height=480)
111
+ user_input = gr.Textbox(label="Type your message")
112
+ send_btn = gr.Button("Send ✈️", elem_classes="primary")
113
+
114
+ # Event bindings
115
+ send_btn.click(ai_response, inputs=[user_input, chatbot, context_display, None], outputs=[chatbot, chatbot])
 
 
 
 
 
 
 
 
 
116
  clear_btn.click(clear_memory, outputs=[chatbot, context_display])
117
+ gen_mcq_btn.click(generate_mcq, inputs=[mcq_topic], outputs=mcq_output)
118
+ advisor_btn.click(ai_response, inputs=[advisor_txt, chatbot, context_display, None], outputs=[chatbot, chatbot])
119
+ tutor_sub.change(update_context, inputs=tutor_sub, outputs=context_display)
120
+ planner_opt.change(update_context, inputs=planner_opt, outputs=context_display)
121
 
122
+ iface.launch()