Spaces:
Sleeping
Sleeping
| import os | |
| import numpy as np | |
| import matplotlib | |
| matplotlib.use("Agg") | |
| import matplotlib.pyplot as plt | |
| from datetime import datetime | |
| from transformers import pipeline | |
| from gtts import gTTS | |
| import gradio as gr | |
| LLM_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
| ASR_MODEL = "openai/whisper-base.en" | |
| text_gen = pipeline("text-generation", model=LLM_MODEL, max_new_tokens=512) | |
| asr = pipeline("automatic-speech-recognition", model=ASR_MODEL) | |
| def tts_to_mp3(text, fname="feedback_tts.mp3", lang="en"): | |
| tts = gTTS(text=text, lang=lang) | |
| tts.save(fname) | |
| return fname | |
| def make_feedback_visual(scores, labels, out_path="feedback_visual.png"): | |
| plt.figure(figsize=(4,3), dpi=150) | |
| bars = plt.bar(labels, scores, color=["#4C78A8", "#F58518", "#54A24B"]) | |
| plt.ylim(0, 100) | |
| plt.title("Feedback Snapshot") | |
| for b, s in zip(bars, scores): | |
| plt.text(b.get_x()+b.get_width()/2, s+2, f"{s:.0f}", ha="center", fontsize=9) | |
| plt.tight_layout() | |
| plt.savefig(out_path, format="png") | |
| plt.close() | |
| return out_path | |
| def heuristic_scores(feedback_text): | |
| ft = feedback_text.lower() | |
| concept = 70 + 15*("concept" in ft) - 10*("misconception" in ft) | |
| strategy = 65 + 15*("strategy" in ft) + 10*("hint" in ft) - 10*("inefficient" in ft) | |
| accuracy = 60 + 20*("correct" in ft) - 15*("error" in ft) | |
| import numpy as np | |
| return [int(np.clip(x, 0, 100)) for x in [concept, strategy, accuracy]] | |
| def scaffold_prompt(problem, student_text): | |
| return f""" | |
| You are a pedagogy-aware tutor that gives formative feedback that is timely, specific, and goal-referenced. | |
| Structure the response with: | |
| 1) Clarify the goal (feeding up), | |
| 2) How it’s going with specific, nonevaluative notes (feeding back), | |
| 3) Where to next with scaffolded hints (feeding forward), | |
| 4) A brief worked example snippet, | |
| 5) 2 self-check questions. | |
| Problem: | |
| {problem} | |
| Student work: | |
| {student_text} | |
| Be concise, supportive, and focus on actionable steps, include concrete hints, and avoid generic praise. | |
| """ | |
| def generate_feedback(problem_text, student_text, audio_file=None): | |
| transcript = "" | |
| if audio_file: | |
| try: | |
| tr = asr(audio_file) | |
| transcript = tr.get("text", "").strip() | |
| except Exception as e: | |
| transcript = f"[ASR error: {e}]" | |
| combined_student = (student_text or "").strip() | |
| if transcript: | |
| combined_student += f"\n[Voice transcription]\n{transcript}" | |
| prompt = scaffold_prompt(problem_text, combined_student if combined_student else "(No student work provided)") | |
| gen = text_gen(prompt, do_sample=True, temperature=0.4, top_p=0.9)["generated_text"] | |
| feedback = gen[len(prompt):].strip() if gen.startswith(prompt) else gen.strip() | |
| scores = heuristic_scores(feedback) | |
| img_path = make_feedback_visual(scores, ["Concept", "Strategy", "Accuracy"]) | |
| audio_out = tts_to_mp3(feedback, fname=f"feedback_{datetime.now().strftime('%H%M%S')}.mp3") | |
| return feedback, audio_out, img_path | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## GenAI Feedback Assistant") | |
| with gr.Tab("Text mode"): | |
| p_txt = gr.Textbox(label="Problem", lines=3) | |
| s_txt = gr.Textbox(label="Student Work", lines=6) | |
| btn = gr.Button("Generate Feedback") | |
| out_text = gr.Textbox(label="Feedback", lines=12) | |
| out_audio = gr.Audio(label="Feedback (TTS)", type="filepath") | |
| out_img = gr.Image(label="Visual Snapshot (PNG)", type="filepath") | |
| btn.click(generate_feedback, inputs=[p_txt, s_txt, gr.State(None)], outputs=[out_text, out_audio, out_img]) | |
| with gr.Tab("Voice mode"): | |
| p_txt2 = gr.Textbox(label="Problem", lines=3) | |
| s_txt2 = gr.Textbox(label="Student Work (optional)", lines=6) | |
| mic = gr.Audio(sources="microphone", type="filepath", label="Explain thinking (optional)") | |
| btn2 = gr.Button("Generate Feedback") | |
| out_text2 = gr.Textbox(label="Feedback", lines=12) | |
| out_audio2 = gr.Audio(label="Feedback (TTS)", type="filepath") | |
| out_img2 = gr.Image(label="Visual Snapshot (PNG)", type="filepath") | |
| btn2.click(generate_feedback, inputs=[p_txt2, s_txt2, mic], outputs=[out_text2, out_audio2, out_img2]) | |
| demo.launch() |