Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,201 +1,70 @@
|
|
| 1 |
-
import os
|
| 2 |
import gradio as gr
|
| 3 |
import requests
|
| 4 |
-
import
|
| 5 |
import random
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
def search_web(query):
|
| 9 |
-
try:
|
| 10 |
-
url = "https://api.duckduckgo.com/"
|
| 11 |
-
params = {"q": query, "format": "json", "no_html": 1, "skip_disambig": 1}
|
| 12 |
-
response = requests.get(url, params=params)
|
| 13 |
-
data = response.json()
|
| 14 |
-
|
| 15 |
-
if data.get("AbstractText"):
|
| 16 |
-
return data["AbstractText"]
|
| 17 |
-
elif data.get("RelatedTopics"):
|
| 18 |
-
topics = [t.get("Text", "") for t in data["RelatedTopics"] if "Text" in t]
|
| 19 |
-
return " ".join(topics[:3])
|
| 20 |
-
else:
|
| 21 |
-
return "No useful information found."
|
| 22 |
-
except Exception as e:
|
| 23 |
-
return f"Search error: {e}"
|
| 24 |
-
|
| 25 |
-
# 🧠 Memory setup
|
| 26 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 27 |
-
MEMORY_FILE = "memory.json"
|
| 28 |
-
|
| 29 |
-
def load_memory():
|
| 30 |
-
if os.path.exists(MEMORY_FILE):
|
| 31 |
-
with open(MEMORY_FILE, "r") as f:
|
| 32 |
-
return json.load(f)
|
| 33 |
-
return []
|
| 34 |
-
|
| 35 |
-
def save_memory(memory):
|
| 36 |
-
with open(MEMORY_FILE, "w") as f:
|
| 37 |
-
json.dump(memory, f)
|
| 38 |
-
|
| 39 |
-
memory = load_memory()
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
if message.lower().startswith("search "):
|
| 47 |
-
query = message[7:]
|
| 48 |
-
search_result = search_web(query)
|
| 49 |
-
history.append((message, f"🔎 Here's what I found online:\n\n{search_result}"))
|
| 50 |
-
save_memory(history)
|
| 51 |
-
return history, history
|
| 52 |
-
|
| 53 |
-
if file_input:
|
| 54 |
-
file_name = file_input.name
|
| 55 |
-
message += f"\n\n📎 (User uploaded a file named '{file_name}')"
|
| 56 |
-
|
| 57 |
-
conversation = [
|
| 58 |
-
{"role": "system", "content": (
|
| 59 |
-
"You are EduAI, a multilingual educational AI assistant created by a Sri Lankan student named Wafa Fazly. "
|
| 60 |
-
"When solving math, explain step-by-step like a professional tutor. "
|
| 61 |
-
"Use Markdown and LaTeX formatting for equations (use \\[ and \\]). "
|
| 62 |
-
"Keep answers neat, structured, and student-friendly."
|
| 63 |
-
)}
|
| 64 |
-
]
|
| 65 |
-
|
| 66 |
-
for past_user, past_bot in history[-5:]:
|
| 67 |
-
conversation.append({"role": "user", "content": past_user})
|
| 68 |
-
conversation.append({"role": "assistant", "content": past_bot})
|
| 69 |
-
|
| 70 |
-
conversation.append({"role": "user", "content": message})
|
| 71 |
|
| 72 |
try:
|
| 73 |
response = requests.post(
|
| 74 |
-
"https://
|
| 75 |
-
headers={
|
| 76 |
-
|
| 77 |
-
"Content-Type": "application/json"
|
| 78 |
-
},
|
| 79 |
-
json={
|
| 80 |
-
"model": "deepseek-ai/DeepSeek-V3.2-Exp:novita",
|
| 81 |
-
"messages": conversation
|
| 82 |
-
}
|
| 83 |
)
|
| 84 |
-
|
| 85 |
data = response.json()
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
|
|
|
|
|
|
| 100 |
except Exception as e:
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
for opt in q['options']:
|
| 135 |
-
quiz_html += f"<input type='checkbox' name='q{i}' value='{opt}'> {opt}<br>"
|
| 136 |
-
quiz_html += "<br></div>"
|
| 137 |
-
quiz_html += "<br><b>✅ Select your answers and review!</b>"
|
| 138 |
-
return quiz_html
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
# 🎨 Gradio Interface
|
| 142 |
-
with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as iface:
|
| 143 |
-
gr.Markdown("# 🎓 **EduAI — Your Smart Study Companion**")
|
| 144 |
-
|
| 145 |
-
with gr.Row():
|
| 146 |
-
with gr.Column(scale=1, min_width=230):
|
| 147 |
-
gr.Markdown("### 🧭 **Main Menu**")
|
| 148 |
-
|
| 149 |
-
with gr.Accordion("📚 Subject Tutor", open=False):
|
| 150 |
-
subj = gr.Radio(
|
| 151 |
-
["Science 🧪", "ICT 💻", "English 📘", "Mathematics ➗"],
|
| 152 |
-
label="Choose a subject"
|
| 153 |
-
)
|
| 154 |
-
|
| 155 |
-
with gr.Accordion("🗓 Study Planner", open=False):
|
| 156 |
-
planner = gr.Radio(
|
| 157 |
-
["View Plan 📅", "Add Task ✏️", "Study Tips 💡"],
|
| 158 |
-
label="Planner Options"
|
| 159 |
-
)
|
| 160 |
-
|
| 161 |
-
with gr.Accordion("🌐 Languages", open=False):
|
| 162 |
-
lang = gr.Radio(
|
| 163 |
-
["Learn Sinhala 🇱🇰", "Learn Tamil 🇮🇳", "Learn English 🇬🇧", "Learn Spanish 🇪🇸"],
|
| 164 |
-
label="Language Options"
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
with gr.Accordion("🧠 MCQ Quiz Generator", open=False):
|
| 168 |
-
gen_btn = gr.Button("🎯 Generate Quiz")
|
| 169 |
-
quiz_output = gr.HTML("<i>Click 'Generate Quiz' to start!</i>")
|
| 170 |
-
|
| 171 |
-
with gr.Accordion("⚙️ Settings", open=False):
|
| 172 |
-
clear_btn = gr.Button("🧹 Clear Memory")
|
| 173 |
-
|
| 174 |
-
with gr.Accordion("👩🎓 About", open=False):
|
| 175 |
-
gr.Markdown("""
|
| 176 |
-
EduAI – developed and fine-tuned by **Wafa Fazly** using a pre-trained AI model,
|
| 177 |
-
to help learners understand **Science, ICT, English, and more** —
|
| 178 |
-
in a simple and friendly way! 💬
|
| 179 |
-
""")
|
| 180 |
-
|
| 181 |
-
with gr.Column(scale=4):
|
| 182 |
-
context_display = gr.Markdown("📘 **You are in General Mode.** Ask EduAI anything about your studies!")
|
| 183 |
-
chatbot = gr.Chatbot(
|
| 184 |
-
label="EduAI Chat",
|
| 185 |
-
height=450,
|
| 186 |
-
render_markdown=True,
|
| 187 |
-
latex_delimiters=[{"left": "$$", "right": "$$", "display": True}, {"left": "\\[", "right": "\\]", "display": True}]
|
| 188 |
-
)
|
| 189 |
-
msg = gr.Textbox(label="Ask EduAI:")
|
| 190 |
-
file_input = gr.File(label="📂 Upload a study file (PDF, DOCX, or image):")
|
| 191 |
-
send = gr.Button("Send ✈️")
|
| 192 |
-
|
| 193 |
-
# 🪄 Event handlers
|
| 194 |
-
subj.change(update_context, inputs=subj, outputs=context_display)
|
| 195 |
-
planner.change(update_context, inputs=planner, outputs=context_display)
|
| 196 |
-
lang.change(update_context, inputs=lang, outputs=context_display)
|
| 197 |
-
send.click(chat_with_model, inputs=[msg, chatbot, context_display, file_input], outputs=[chatbot, chatbot])
|
| 198 |
-
clear_btn.click(clear_memory, outputs=[chatbot, context_display])
|
| 199 |
-
gen_btn.click(generate_quiz, outputs=quiz_output)
|
| 200 |
-
|
| 201 |
-
iface.launch()
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import requests
|
| 3 |
+
import os
|
| 4 |
import random
|
| 5 |
|
| 6 |
+
# 🧠 Function to call a Hugging Face model (or any LLM endpoint)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
def generate_mcqs(topic="science", num_questions=3):
|
| 10 |
+
"""Generate MCQs using a model API"""
|
| 11 |
+
prompt = f"Create {num_questions} multiple-choice questions on {topic}. " \
|
| 12 |
+
f"Each question should have 4 options (A–D) and specify the correct answer."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
try:
|
| 15 |
response = requests.post(
|
| 16 |
+
"https://api-inference.huggingface.co/models/google/gemma-2b",
|
| 17 |
+
headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
| 18 |
+
json={"inputs": prompt}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
)
|
|
|
|
| 20 |
data = response.json()
|
| 21 |
+
text = data[0]["generated_text"]
|
| 22 |
+
|
| 23 |
+
# 🪄 Simple parsing: try to split questions
|
| 24 |
+
questions = []
|
| 25 |
+
for block in text.split("\n\n"):
|
| 26 |
+
if "." in block:
|
| 27 |
+
parts = block.split("\n")
|
| 28 |
+
q_line = parts[0]
|
| 29 |
+
opts = [p.strip() for p in parts[1:5] if p.strip()]
|
| 30 |
+
ans = ""
|
| 31 |
+
for p in parts:
|
| 32 |
+
if "Answer" in p or "Correct" in p:
|
| 33 |
+
ans = p.split(":")[-1].strip()
|
| 34 |
+
if len(opts) == 4:
|
| 35 |
+
questions.append({"question": q_line, "options": opts, "answer": ans})
|
| 36 |
+
return questions if questions else [{"question": "⚠️ Couldn’t parse any question.", "options": [], "answer": ""}]
|
| 37 |
except Exception as e:
|
| 38 |
+
return [{"question": f"Error: {e}", "options": [], "answer": ""}]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# 🎯 Build quiz HTML with checkboxes
|
| 42 |
+
def build_quiz_html(topic):
|
| 43 |
+
mcqs = generate_mcqs(topic)
|
| 44 |
+
html = "<div style='font-family:Poppins,sans-serif;color:#222;'>"
|
| 45 |
+
for i, q in enumerate(mcqs, 1):
|
| 46 |
+
html += f"<div style='margin:15px;padding:15px;border-radius:10px;background:#fff9ff;box-shadow:0 3px 10px rgba(0,0,0,0.08);'>"
|
| 47 |
+
html += f"<b style='color:#5b21b6;'>{i}. {q['question']}</b><br><br>"
|
| 48 |
+
for opt in q["options"]:
|
| 49 |
+
html += f"<label style='display:block;margin-bottom:5px;'><input type='checkbox' name='q{i}' value='{opt}' style='accent-color:#7c3aed;'> {opt}</label>"
|
| 50 |
+
html += f"<p style='color:#7e22ce;display:none;' id='ans{i}'><b>✅ Correct Answer:</b> {q['answer']}</p>"
|
| 51 |
+
html += "</div>"
|
| 52 |
+
html += """
|
| 53 |
+
<button onclick="document.querySelectorAll('[id^=ans]').forEach(a=>a.style.display='block');"
|
| 54 |
+
style='margin-top:10px;background:#7c3aed;color:white;border:none;padding:10px 20px;border-radius:8px;cursor:pointer;'>
|
| 55 |
+
Show Answers
|
| 56 |
+
</button>
|
| 57 |
+
</div>"""
|
| 58 |
+
return html
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
# ⚙️ Gradio Interface
|
| 62 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as demo:
|
| 63 |
+
gr.Markdown("<h1 style='text-align:center;color:#5b21b6;'>🧠 EduAI — Auto MCQ Quiz Generator</h1>")
|
| 64 |
+
topic = gr.Textbox(label="Enter a topic (e.g. Physics, History, Chemistry)")
|
| 65 |
+
gen_btn = gr.Button("🎯 Generate Quiz")
|
| 66 |
+
quiz_output = gr.HTML("<i>Enter a topic and click Generate to start!</i>")
|
| 67 |
+
|
| 68 |
+
gen_btn.click(build_quiz_html, inputs=topic, outputs=quiz_output)
|
| 69 |
+
|
| 70 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|