DarkSting commited on
Commit
c0be5b7
·
verified ·
1 Parent(s): 32d20ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -1
app.py CHANGED
@@ -288,6 +288,176 @@ def evaluate_question_topic_match(data):
288
  return parse_matching_json(generated)
289
 
290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  # ===============================================================================
292
  # FLASK APP
293
  # ===============================================================================
@@ -308,7 +478,10 @@ def generate():
308
  elif calling_type == 'analyze':
309
  return analyze_gen(data.get('teacher_data'))
310
 
311
- return jsonify({"error": "Invalid model type. Use 'teacher', 'question', or 'analyze'"}), 400
 
 
 
312
 
313
 
314
  def question_gen(data):
@@ -347,6 +520,15 @@ def teacher_guide(data):
347
  return jsonify({"output": prompt})
348
 
349
 
 
 
 
 
 
 
 
 
 
350
  if __name__ == "__main__":
351
  print("\n" + "="*70)
352
  print("SERVER READY!")
@@ -356,6 +538,7 @@ if __name__ == "__main__":
356
  print(" - model_type: 'question' → math question generation")
357
  print(" - model_type: 'analyze' → question-topic match scoring")
358
  print(" - model_type: 'teacher' → teaching feedback analysis")
 
359
  print("="*70 + "\n")
360
 
361
  app.run(host="0.0.0.0", port=7860)
 
288
  return parse_matching_json(generated)
289
 
290
 
291
+ # ===============================================================================
292
+ # STUDENT FEEDBACK ANALYSER MODEL
293
+ # ===============================================================================
294
+
295
+ FEEDBACK_MODEL_PATH = "Qwen/Qwen2-1.5B-Instruct"
296
+ FEEDBACK_ADAPTER_PATH = "Chamith2000/Video_Feedback"
297
+
298
+ VALID_ISSUES = {"audio", "confusion", "difficulty", "pacing", "positive"}
299
+ VALID_SEVERITIES = {"high", "medium", "low"}
300
+
301
+ print("Loading student feedback analyser model...")
302
+ feedback_tokenizer = AutoTokenizer.from_pretrained(
303
+ FEEDBACK_MODEL_PATH,
304
+ trust_remote_code=True,
305
+ padding_side="right",
306
+ )
307
+ if feedback_tokenizer.pad_token is None:
308
+ feedback_tokenizer.pad_token = feedback_tokenizer.eos_token
309
+
310
+ bnb_config_feedback = BitsAndBytesConfig(
311
+ load_in_4bit=True,
312
+ bnb_4bit_quant_type="nf4",
313
+ bnb_4bit_compute_dtype=torch.float16,
314
+ bnb_4bit_use_double_quant=True,
315
+ )
316
+
317
+ feedback_base = AutoModelForCausalLM.from_pretrained(
318
+ FEEDBACK_MODEL_PATH,
319
+ quantization_config=bnb_config_feedback,
320
+ device_map='auto',
321
+ trust_remote_code=True,
322
+ torch_dtype=torch.float16,
323
+ )
324
+ feedback_model = PeftModel.from_pretrained(feedback_base, FEEDBACK_ADAPTER_PATH)
325
+ feedback_model.eval()
326
+ print("✅ Student feedback analyser model loaded!")
327
+
328
+
329
+ def build_feedback_prompt(text: str, grade: int, lesson: str, video_content: str) -> str:
330
+ messages = [
331
+ {
332
+ "role": "system",
333
+ "content": (
334
+ "You are an educational feedback analyser for a mathematics e-learning platform. "
335
+ "You analyse student feedback and return ONLY a valid JSON object — no explanation, "
336
+ "no markdown, no extra text. Just the raw JSON."
337
+ ),
338
+ },
339
+ {
340
+ "role": "user",
341
+ "content": (
342
+ f"Analyse the student feedback below and return a JSON object.\n\n"
343
+ f"Student details:\n"
344
+ f" Grade : {grade}\n"
345
+ f" Lesson : {lesson}\n"
346
+ f" Video context: {video_content}\n\n"
347
+ f"Student feedback: \"{text}\"\n\n"
348
+ f"Return JSON with exactly these keys:\n"
349
+ f" issue : one of [audio, confusion, difficulty, pacing, positive]\n"
350
+ f" severity : one of [high, medium, low]\n"
351
+ f" recommendation : a specific actionable instruction for the teacher\n"
352
+ f" confidence : your confidence as a float between 0.0 and 1.0\n\n"
353
+ f"JSON:"
354
+ ),
355
+ },
356
+ ]
357
+ return feedback_tokenizer.apply_chat_template(
358
+ messages,
359
+ tokenize=False,
360
+ add_generation_prompt=True,
361
+ )
362
+
363
+
364
+ def parse_feedback_json(text: str) -> dict:
365
+ """Parse and validate the model's JSON output for feedback analysis."""
366
+ clean = text.strip()
367
+
368
+ # Strip markdown code fences if present
369
+ if clean.startswith("```"):
370
+ lines = clean.split("\n")
371
+ clean = "\n".join(l for l in lines if not l.startswith("```"))
372
+
373
+ # Try direct parse
374
+ try:
375
+ parsed = json.loads(clean)
376
+ except json.JSONDecodeError:
377
+ # Fall back to brace extraction
378
+ start = clean.find('{')
379
+ end = clean.rfind('}') + 1
380
+ if start != -1 and end > start:
381
+ try:
382
+ parsed = json.loads(clean[start:end])
383
+ except json.JSONDecodeError:
384
+ return {
385
+ "error": "Failed to parse model output as JSON",
386
+ "raw_output": text[:500],
387
+ }
388
+ else:
389
+ return {
390
+ "error": "No JSON object found in model output",
391
+ "raw_output": text[:500],
392
+ }
393
+
394
+ # Normalise and validate fields
395
+ issue = str(parsed.get("issue", "")).lower().strip()
396
+ severity = str(parsed.get("severity", "")).lower().strip()
397
+ rec = parsed.get("recommendation", "")
398
+ raw_conf = parsed.get("confidence", 0.0)
399
+
400
+ try:
401
+ confidence = float(raw_conf)
402
+ confidence = max(0.0, min(1.0, confidence))
403
+ except (TypeError, ValueError):
404
+ confidence = 0.0
405
+
406
+ if issue not in VALID_ISSUES:
407
+ issue = "confusion" # safe default
408
+ if severity not in VALID_SEVERITIES:
409
+ severity = "medium" # safe default
410
+
411
+ return {
412
+ "issue": issue,
413
+ "severity": severity,
414
+ "recommendation": rec,
415
+ "confidence": confidence,
416
+ }
417
+
418
+
419
+ @torch.no_grad()
420
+ def infer_student_feedback(data: dict) -> dict:
421
+ """Run inference with the student feedback analyser model."""
422
+ text = data.get("text", "").strip()
423
+ grade = data.get("grade", 3)
424
+ lesson = data.get("lesson", "")
425
+ video_content = data.get("video_content", "")
426
+
427
+ if not text:
428
+ return {"error": "Missing required field: 'text'"}
429
+ if not lesson:
430
+ return {"error": "Missing required field: 'lesson'"}
431
+ if not video_content:
432
+ return {"error": "Missing required field: 'video_content'"}
433
+
434
+ prompt = build_feedback_prompt(text, grade, lesson, video_content)
435
+
436
+ inputs = feedback_tokenizer(
437
+ prompt,
438
+ return_tensors="pt",
439
+ truncation=True,
440
+ max_length=512,
441
+ ).to(DEVICE)
442
+
443
+ outputs = feedback_model.generate(
444
+ **inputs,
445
+ max_new_tokens=150,
446
+ do_sample=False,
447
+ temperature=1.0,
448
+ repetition_penalty=1.1,
449
+ pad_token_id=feedback_tokenizer.eos_token_id,
450
+ eos_token_id=feedback_tokenizer.eos_token_id,
451
+ )
452
+
453
+ generated = feedback_tokenizer.decode(
454
+ outputs[0][inputs["input_ids"].shape[1]:],
455
+ skip_special_tokens=True,
456
+ ).strip()
457
+
458
+ return parse_feedback_json(generated)
459
+
460
+
461
  # ===============================================================================
462
  # FLASK APP
463
  # ===============================================================================
 
478
  elif calling_type == 'analyze':
479
  return analyze_gen(data.get('teacher_data'))
480
 
481
+ elif calling_type == 'feedback':
482
+ return student_feedback(data.get('feedback_data'))
483
+
484
+ return jsonify({"error": "Invalid model type. Use 'teacher', 'question', 'analyze', or 'feedback'"}), 400
485
 
486
 
487
  def question_gen(data):
 
520
  return jsonify({"output": prompt})
521
 
522
 
523
+ def student_feedback(data):
524
+ """Analyse student video feedback and classify issue, severity, and recommendation."""
525
+ if not data:
526
+ return jsonify({"error": "Missing 'feedback_data'"}), 400
527
+
528
+ result = infer_student_feedback(data)
529
+ return jsonify({"output": result})
530
+
531
+
532
  if __name__ == "__main__":
533
  print("\n" + "="*70)
534
  print("SERVER READY!")
 
538
  print(" - model_type: 'question' → math question generation")
539
  print(" - model_type: 'analyze' → question-topic match scoring")
540
  print(" - model_type: 'teacher' → teaching feedback analysis")
541
+ print(" - model_type: 'feedback' → student video feedback analysis")
542
  print("="*70 + "\n")
543
 
544
  app.run(host="0.0.0.0", port=7860)