sparshmehta commited on
Commit
f394b39
·
verified ·
1 Parent(s): 02aa5ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -188
app.py CHANGED
@@ -224,6 +224,8 @@ class ContentAnalyzer:
224
  self.client = OpenAI(api_key=api_key)
225
  self.retry_count = 3
226
  self.retry_delay = 1
 
 
227
 
228
  def analyze_content(self, transcript: str, progress_callback=None) -> Dict[str, Any]:
229
  """Analyze teaching content with retry logic and robust JSON handling"""
@@ -233,8 +235,6 @@ class ContentAnalyzer:
233
  progress_callback(0.2, "Preparing content analysis...")
234
 
235
  prompt = self._create_analysis_prompt(transcript)
236
- logger.info(f"Attempt {attempt + 1}: Sending analysis request")
237
- logger.info(f"Transcript length: {len(transcript)} characters")
238
 
239
  if progress_callback:
240
  progress_callback(0.5, "Processing with AI model...")
@@ -651,6 +651,9 @@ class MentorEvaluator:
651
  self._feature_extractor = None
652
  self._content_analyzer = None
653
  self._recommendation_generator = None
 
 
 
654
 
655
  @property
656
  def whisper_model(self):
@@ -660,8 +663,8 @@ class MentorEvaluator:
660
  logger.info("Attempting to initialize Whisper model...")
661
  # First try to initialize model with downloading allowed
662
  self._whisper_model = WhisperModel(
663
- "small",
664
- device="small",
665
  compute_type="int8",
666
  download_root=self.model_cache_dir,
667
  local_files_only=False # Allow downloading if needed
@@ -1186,68 +1189,12 @@ def display_evaluation(evaluation: Dict[str, Any]):
1186
 
1187
  recommendations = evaluation.get("recommendations", {})
1188
 
1189
- # Calculate Overall Score
1190
- communication_metrics = evaluation.get("communication", {})
1191
- teaching_data = evaluation.get("teaching", {})
1192
-
1193
- # Calculate Communication Score
1194
- comm_scores = []
1195
- for category in ["speed", "fluency", "flow", "intonation", "energy"]:
1196
- if category in communication_metrics:
1197
- if "score" in communication_metrics[category]:
1198
- comm_scores.append(communication_metrics[category]["score"])
1199
-
1200
- communication_score = (sum(comm_scores) / len(comm_scores) * 100) if comm_scores else 0
1201
-
1202
- # Calculate Teaching Score (combining concept and code assessment)
1203
- concept_assessment = teaching_data.get("Concept Assessment", {})
1204
- code_assessment = teaching_data.get("Code Assessment", {})
1205
-
1206
- teaching_scores = []
1207
- # Add concept scores
1208
- for category in concept_assessment.values():
1209
- if isinstance(category, dict) and "Score" in category:
1210
- teaching_scores.append(category["Score"])
1211
-
1212
- # Add code scores
1213
- for category in code_assessment.values():
1214
- if isinstance(category, dict) and "Score" in category:
1215
- teaching_scores.append(category["Score"])
1216
 
1217
- teaching_score = (sum(teaching_scores) / len(teaching_scores) * 100) if teaching_scores else 0
1218
-
1219
- # Calculate Overall Score (50-50 weight between communication and teaching)
1220
- overall_score = (communication_score + teaching_score) / 2
1221
-
1222
- # Display Overall Scores at the top of recommendations
1223
- st.markdown("### 📊 Overall Performance")
1224
- col1, col2, col3 = st.columns(3)
1225
-
1226
- with col1:
1227
- st.metric(
1228
- "Communication Score",
1229
- f"{communication_score:.1f}%",
1230
- delta="Pass" if communication_score >= 70 else "Needs Improvement",
1231
- delta_color="normal" if communication_score >= 70 else "inverse"
1232
- )
1233
-
1234
- with col2:
1235
- st.metric(
1236
- "Teaching Score",
1237
- f"{teaching_score:.1f}%",
1238
- delta="Pass" if teaching_score >= 70 else "Needs Improvement",
1239
- delta_color="normal" if teaching_score >= 70 else "inverse"
1240
- )
1241
-
1242
- with col3:
1243
- st.metric(
1244
- "Overall Score",
1245
- f"{overall_score:.1f}%",
1246
- delta="Pass" if overall_score >= 70 else "Needs Improvement",
1247
- delta_color="normal" if overall_score >= 70 else "inverse"
1248
- )
1249
-
1250
- # Continue with existing recommendations display
1251
  with st.expander("💡 Areas for Improvement", expanded=True):
1252
  improvements = recommendations.get("improvements", [])
1253
  if isinstance(improvements, list):
@@ -1528,108 +1475,6 @@ def check_dependencies() -> List[str]:
1528
 
1529
  return missing
1530
 
1531
- def generate_pdf_report(evaluation_data: Dict[str, Any]) -> bytes:
1532
- """Generate a formatted PDF report from evaluation data"""
1533
- try:
1534
- from reportlab.lib import colors
1535
- from reportlab.lib.pagesizes import letter
1536
- from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
1537
- from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle
1538
- from io import BytesIO
1539
-
1540
- # Create PDF buffer
1541
- buffer = BytesIO()
1542
- doc = SimpleDocTemplate(buffer, pagesize=letter)
1543
- styles = getSampleStyleSheet()
1544
- story = []
1545
-
1546
- # Title
1547
- title_style = ParagraphStyle(
1548
- 'CustomTitle',
1549
- parent=styles['Heading1'],
1550
- fontSize=24,
1551
- spaceAfter=30
1552
- )
1553
- story.append(Paragraph("Mentor Demo Evaluation Report", title_style))
1554
- story.append(Spacer(1, 20))
1555
-
1556
- # Communication Metrics Section
1557
- story.append(Paragraph("Communication Metrics", styles['Heading2']))
1558
- comm_metrics = evaluation_data.get("communication", {})
1559
-
1560
- # Create tables for each metric category
1561
- for category in ["speed", "fluency", "flow", "intonation", "energy"]:
1562
- if category in comm_metrics:
1563
- metrics = comm_metrics[category]
1564
- story.append(Paragraph(category.title(), styles['Heading3']))
1565
-
1566
- data = [[k.replace('_', ' ').title(), str(v)] for k, v in metrics.items()]
1567
- t = Table(data, colWidths=[200, 200])
1568
- t.setStyle(TableStyle([
1569
- ('BACKGROUND', (0, 0), (-1, 0), colors.grey),
1570
- ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
1571
- ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
1572
- ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
1573
- ('FONTSIZE', (0, 0), (-1, 0), 14),
1574
- ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
1575
- ('BACKGROUND', (0, 1), (-1, -1), colors.beige),
1576
- ('TEXTCOLOR', (0, 1), (-1, -1), colors.black),
1577
- ('FONTNAME', (0, 1), (-1, -1), 'Helvetica'),
1578
- ('FONTSIZE', (0, 1), (-1, -1), 12),
1579
- ('GRID', (0, 0), (-1, -1), 1, colors.black)
1580
- ]))
1581
- story.append(t)
1582
- story.append(Spacer(1, 20))
1583
-
1584
- # Teaching Analysis Section
1585
- story.append(Paragraph("Teaching Analysis", styles['Heading2']))
1586
- teaching_data = evaluation_data.get("teaching", {})
1587
-
1588
- for assessment_type in ["Concept Assessment", "Code Assessment"]:
1589
- if assessment_type in teaching_data:
1590
- story.append(Paragraph(assessment_type, styles['Heading3']))
1591
- categories = teaching_data[assessment_type]
1592
-
1593
- for category, details in categories.items():
1594
- score = details.get("Score", 0)
1595
- citations = details.get("Citations", [])
1596
-
1597
- data = [
1598
- [category, "Score: " + ("Pass" if score == 1 else "Needs Improvement")],
1599
- ["Citations:", ""]
1600
- ] + [["-", citation] for citation in citations]
1601
-
1602
- t = Table(data, colWidths=[200, 300])
1603
- t.setStyle(TableStyle([
1604
- ('BACKGROUND', (0, 0), (-1, 0), colors.grey),
1605
- ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
1606
- ('ALIGN', (0, 0), (-1, -1), 'LEFT'),
1607
- ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
1608
- ('GRID', (0, 0), (-1, -1), 1, colors.black)
1609
- ]))
1610
- story.append(t)
1611
- story.append(Spacer(1, 20))
1612
-
1613
- # Recommendations Section
1614
- story.append(Paragraph("Recommendations", styles['Heading2']))
1615
- recommendations = evaluation_data.get("recommendations", {})
1616
-
1617
- if "improvements" in recommendations:
1618
- story.append(Paragraph("Areas for Improvement:", styles['Heading3']))
1619
- for improvement in recommendations["improvements"]:
1620
- story.append(Paragraph("• " + improvement, styles['Normal']))
1621
-
1622
- # Build PDF
1623
- doc.build(story)
1624
- pdf_data = buffer.getvalue()
1625
- buffer.close()
1626
-
1627
- return pdf_data
1628
-
1629
- except Exception as e:
1630
- logger.error(f"Error generating PDF report: {e}")
1631
- raise RuntimeError(f"Failed to generate PDF report: {str(e)}")
1632
-
1633
  def main():
1634
  try:
1635
  # Set page config must be the first Streamlit command
@@ -1870,28 +1715,21 @@ def main():
1870
  st.success("Analysis complete!")
1871
  display_evaluation(st.session_state.evaluation_results)
1872
 
1873
- # Add download options
1874
- col1, col2 = st.columns(2)
 
 
 
 
 
 
 
1875
 
1876
- with col1:
1877
- if st.download_button(
1878
- "📥 Download JSON Report",
1879
- json.dumps(st.session_state.evaluation_results, indent=2),
1880
- "evaluation_report.json",
1881
- "application/json",
1882
- help="Download the raw evaluation data in JSON format"
1883
- ):
1884
- st.success("JSON report downloaded successfully!")
1885
-
1886
- with col2:
1887
- if st.download_button(
1888
- "📄 Download Full Report (PDF)",
1889
- generate_pdf_report(st.session_state.evaluation_results),
1890
- "evaluation_report.pdf",
1891
- "application/pdf",
1892
- help="Download a formatted PDF report with detailed analysis"
1893
- ):
1894
- st.success("PDF report downloaded successfully!")
1895
 
1896
  except Exception as e:
1897
  st.error(f"Error during evaluation: {str(e)}")
 
224
  self.client = OpenAI(api_key=api_key)
225
  self.retry_count = 3
226
  self.retry_delay = 1
227
+ self.GPT4_INPUT_COST = 0.15 / 1_000_000 # $0.15 per 1M tokens input
228
+ self.GPT4_OUTPUT_COST = 0.60 / 1_000_000 # $0.60 per 1M tokens output
229
 
230
  def analyze_content(self, transcript: str, progress_callback=None) -> Dict[str, Any]:
231
  """Analyze teaching content with retry logic and robust JSON handling"""
 
235
  progress_callback(0.2, "Preparing content analysis...")
236
 
237
  prompt = self._create_analysis_prompt(transcript)
 
 
238
 
239
  if progress_callback:
240
  progress_callback(0.5, "Processing with AI model...")
 
651
  self._feature_extractor = None
652
  self._content_analyzer = None
653
  self._recommendation_generator = None
654
+
655
+ # Cost per minute for Whisper transcription
656
+ self.WHISPER_COST_PER_MINUTE = 0.006 # $0.006 per minute of audio
657
 
658
  @property
659
  def whisper_model(self):
 
663
  logger.info("Attempting to initialize Whisper model...")
664
  # First try to initialize model with downloading allowed
665
  self._whisper_model = WhisperModel(
666
+ "medium",
667
+ device="cpu",
668
  compute_type="int8",
669
  download_root=self.model_cache_dir,
670
  local_files_only=False # Allow downloading if needed
 
1189
 
1190
  recommendations = evaluation.get("recommendations", {})
1191
 
1192
+ # Geography Fit with improved formatting
1193
+ # with st.expander("🌍 Geography Fit", expanded=True):
1194
+ # geography_fit = recommendations.get("geographyFit", "Not specified")
1195
+ # st.info(geography_fit)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196
 
1197
+ # Improvements Needed with better formatting
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1198
  with st.expander("💡 Areas for Improvement", expanded=True):
1199
  improvements = recommendations.get("improvements", [])
1200
  if isinstance(improvements, list):
 
1475
 
1476
  return missing
1477
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1478
  def main():
1479
  try:
1480
  # Set page config must be the first Streamlit command
 
1715
  st.success("Analysis complete!")
1716
  display_evaluation(st.session_state.evaluation_results)
1717
 
1718
+ # Add download button using stored results
1719
+ if st.download_button(
1720
+ "📥 Download Full Report",
1721
+ json.dumps(st.session_state.evaluation_results, indent=2),
1722
+ "evaluation_report.json",
1723
+ "application/json",
1724
+ help="Download the complete evaluation report in JSON format"
1725
+ ):
1726
+ st.success("Report downloaded successfully!")
1727
 
1728
+ # Debugging code
1729
+ # if st.session_state.evaluation_results:
1730
+ # st.write("Debug: Teaching Analysis Structure")
1731
+ # teaching_data = st.session_state.evaluation_results.get("teaching", {})
1732
+ # st.json(teaching_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1733
 
1734
  except Exception as e:
1735
  st.error(f"Error during evaluation: {str(e)}")