Deevyankar commited on
Commit
a481d53
Β·
verified Β·
1 Parent(s): ba00bbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -40
app.py CHANGED
@@ -4,6 +4,7 @@ import fitz # PyMuPDF
4
  import docx
5
  import io
6
  import re
 
7
  from sklearn.feature_extraction.text import TfidfVectorizer
8
  from sentence_transformers import SentenceTransformer, util
9
  import matplotlib.pyplot as plt
@@ -20,7 +21,7 @@ def extract_text_from_pdf(pdf_file):
20
  text += page.get_text()
21
  pdf_reader.close()
22
  return text.strip()
23
- except Exception as e:
24
  return ""
25
 
26
  def normalize_text(text):
@@ -29,24 +30,17 @@ def normalize_text(text):
29
  def extract_text_from_docx(docx_file):
30
  try:
31
  doc = docx.Document(io.BytesIO(docx_file))
32
- full_text = []
33
- for para in doc.paragraphs:
34
- if para.text.strip():
35
- full_text.append(para.text.strip())
36
- return full_text
37
  except:
38
  return []
39
 
40
  def semantic_match(lo_list, content):
41
  scores = []
 
42
  for lo in lo_list:
43
- try:
44
- lo_embed = model.encode(lo, convert_to_tensor=True)
45
- content_embed = model.encode(content, convert_to_tensor=True)
46
- sim = util.pytorch_cos_sim(lo_embed, content_embed).item()
47
- scores.append(round(sim, 2))
48
- except:
49
- scores.append(0.0)
50
  return scores
51
 
52
  def content_change_score(text1, text2):
@@ -61,62 +55,78 @@ def compare_handouts(old_pdf, new_pdf, lo_file):
61
  new_text = extract_text_from_pdf(new_pdf)
62
 
63
  if len(old_text.strip()) < 200 or len(new_text.strip()) < 200:
64
- return "⚠️ Could not extract meaningful content from one or both PDFs.", None
65
 
66
  lo_list = extract_text_from_docx(lo_file)
67
  if not lo_list:
68
- return "⚠️ No learning outcomes detected.", None
69
 
70
  old_scores = semantic_match(lo_list, old_text)
71
  new_scores = semantic_match(lo_list, new_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- content_diff_percent = content_change_score(old_text, new_text)
74
- lo_improvements = [round(new - old, 2) for new, old in zip(new_scores, old_scores)]
75
- improved_count = sum([diff > 0.01 for diff in lo_improvements])
76
- improved_total = sum([diff for diff in lo_improvements if diff > 0])
77
-
78
- lo_change_percent = round((improved_total / len(lo_list)) * 100, 2)
79
-
80
- summary = f"🧠 Improved LOs: {improved_count} / {len(lo_list)} (Total improvement score: {improved_total})\n"
81
- summary += f"πŸ“„ Content Change Estimate: {content_diff_percent}%\n"
82
- summary += f"🎯 LO Change Estimate: {lo_change_percent}%\n"
83
  if improved_count > 0:
84
  summary += "🟒 Summary: New handout better aligns with LOs and has improved clarity."
85
  else:
86
  summary += "⚠️ Summary: No significant improvement in LO alignment."
87
 
88
- # Plot
89
  x = np.arange(len(lo_list))
90
  width = 0.35
91
- fig, ax = plt.subplots()
92
  ax.bar(x - width/2, old_scores, width, label='Old')
93
  ax.bar(x + width/2, new_scores, width, label='New')
94
  ax.set_ylabel('Match Score (0-1)')
95
- ax.set_title('LO-wise Match Score: Old vs New')
96
  ax.set_xticks(x)
97
  ax.set_xticklabels([f"LO{i+1}" for i in range(len(lo_list))], rotation=45)
98
  ax.legend()
99
  plt.tight_layout()
100
 
101
- return summary, fig
102
 
103
  with gr.Blocks() as demo:
104
- gr.Markdown("πŸ“˜ **Educational Content Comparator**")
105
- gr.Markdown("Upload 2 handouts and a .docx file of Learning Outcomes to compare changes and alignment.")
106
 
107
  with gr.Row():
108
- old_pdf = gr.File(label="πŸ“‚ Upload Old PDF", file_types=[".pdf"], type="binary")
109
- new_pdf = gr.File(label="πŸ“‚ Upload New PDF", file_types=[".pdf"], type="binary")
110
- lo_file = gr.File(label="πŸ“‚ Upload Learning Outcomes (.docx)", file_types=[".docx"], type="binary")
111
 
112
  with gr.Row():
113
- btn = gr.Button("Submit")
114
- clear_btn = gr.Button("Clear")
115
 
116
- output_text = gr.Textbox(label="πŸ“‹ Summary", lines=6, interactive=False)
117
- output_plot = gr.Plot(label="πŸ“Š LO Match Chart")
 
118
 
119
- btn.click(fn=compare_handouts, inputs=[old_pdf, new_pdf, lo_file], outputs=[output_text, output_plot])
120
- clear_btn.click(fn=lambda: ("", None), inputs=[], outputs=[output_text, output_plot])
 
 
121
 
122
  demo.launch()
 
4
  import docx
5
  import io
6
  import re
7
+ import pandas as pd
8
  from sklearn.feature_extraction.text import TfidfVectorizer
9
  from sentence_transformers import SentenceTransformer, util
10
  import matplotlib.pyplot as plt
 
21
  text += page.get_text()
22
  pdf_reader.close()
23
  return text.strip()
24
+ except Exception:
25
  return ""
26
 
27
  def normalize_text(text):
 
30
  def extract_text_from_docx(docx_file):
31
  try:
32
  doc = docx.Document(io.BytesIO(docx_file))
33
+ return [p.text.strip() for p in doc.paragraphs if p.text.strip()]
 
 
 
 
34
  except:
35
  return []
36
 
37
  def semantic_match(lo_list, content):
38
  scores = []
39
+ content_embed = model.encode(content, convert_to_tensor=True)
40
  for lo in lo_list:
41
+ lo_embed = model.encode(lo, convert_to_tensor=True)
42
+ sim = util.pytorch_cos_sim(lo_embed, content_embed).item()
43
+ scores.append(round(sim, 3))
 
 
 
 
44
  return scores
45
 
46
  def content_change_score(text1, text2):
 
55
  new_text = extract_text_from_pdf(new_pdf)
56
 
57
  if len(old_text.strip()) < 200 or len(new_text.strip()) < 200:
58
+ return "❌ Could not extract text from one or both PDFs.", None, None
59
 
60
  lo_list = extract_text_from_docx(lo_file)
61
  if not lo_list:
62
+ return "⚠️ No learning outcomes detected.", None, None
63
 
64
  old_scores = semantic_match(lo_list, old_text)
65
  new_scores = semantic_match(lo_list, new_text)
66
+ improvement = [round(n - o, 3) for n, o in zip(new_scores, old_scores)]
67
+ improved_count = sum([i > 0 for i in improvement])
68
+
69
+ # Prepare Excel output
70
+ df = pd.DataFrame({
71
+ "Learning Outcome": lo_list,
72
+ "Old Match Score": old_scores,
73
+ "New Match Score": new_scores,
74
+ "Improvement": improvement
75
+ })
76
+
77
+ excel_path = "/mnt/data/LO_Comparison_Report.xlsx"
78
+ df.to_excel(excel_path, index=False)
79
+
80
+ # Scores
81
+ content_diff = content_change_score(old_text, new_text)
82
+ lo_improvement_percent = round((sum(improvement) / len(lo_list)) * 100, 2)
83
+
84
+ summary = (
85
+ f"🧠 Improved LOs: {improved_count} / {len(lo_list)}\n"
86
+ f"πŸ“„ Content Change Estimate: {content_diff}%\n"
87
+ f"πŸ“Š Avg LO Improvement Score: {lo_improvement_percent}%\n\n"
88
+ )
89
 
 
 
 
 
 
 
 
 
 
 
90
  if improved_count > 0:
91
  summary += "🟒 Summary: New handout better aligns with LOs and has improved clarity."
92
  else:
93
  summary += "⚠️ Summary: No significant improvement in LO alignment."
94
 
95
+ # Bar chart
96
  x = np.arange(len(lo_list))
97
  width = 0.35
98
+ fig, ax = plt.subplots(figsize=(10, 5))
99
  ax.bar(x - width/2, old_scores, width, label='Old')
100
  ax.bar(x + width/2, new_scores, width, label='New')
101
  ax.set_ylabel('Match Score (0-1)')
102
+ ax.set_title('LO-wise Match Score Comparison')
103
  ax.set_xticks(x)
104
  ax.set_xticklabels([f"LO{i+1}" for i in range(len(lo_list))], rotation=45)
105
  ax.legend()
106
  plt.tight_layout()
107
 
108
+ return summary, fig, excel_path
109
 
110
  with gr.Blocks() as demo:
111
+ gr.Markdown("### πŸ“˜ Educational Handout Comparison Tool")
112
+ gr.Markdown("Upload an old and new handout PDF, along with Learning Outcomes (.docx), to compare updates.")
113
 
114
  with gr.Row():
115
+ old_pdf = gr.File(label="πŸ“‚ Old Handout", file_types=[".pdf"], type="binary")
116
+ new_pdf = gr.File(label="πŸ“‚ New Handout", file_types=[".pdf"], type="binary")
117
+ lo_file = gr.File(label="πŸ“‚ Learning Outcomes (.docx)", file_types=[".docx"], type="binary")
118
 
119
  with gr.Row():
120
+ btn = gr.Button("πŸ” Compare")
121
+ clear_btn = gr.Button("♻️ Clear")
122
 
123
+ summary_out = gr.Textbox(label="πŸ“‹ Summary", lines=6, interactive=False)
124
+ plot_out = gr.Plot(label="πŸ“Š LO Score Chart")
125
+ download_link = gr.File(label="πŸ“₯ Download Excel Report")
126
 
127
+ btn.click(fn=compare_handouts, inputs=[old_pdf, new_pdf, lo_file],
128
+ outputs=[summary_out, plot_out, download_link])
129
+ clear_btn.click(fn=lambda: ("", None, None),
130
+ inputs=[], outputs=[summary_out, plot_out, download_link])
131
 
132
  demo.launch()