dalinstone commited on
Commit
1824a08
·
verified ·
1 Parent(s): fbffb33

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -253,15 +253,15 @@ async def grade_papers_concurrently(
253
  example_paper_file_obj = genai.upload_file(path=example_paper_file.name, display_name=os.path.basename(example_paper_file.name))
254
 
255
  file_paths = [file.name for file in files]
256
- total_files = len(file_paths)
257
-
258
  with ThreadPoolExecutor(max_workers=1) as executor:
259
  loop = asyncio.get_event_loop()
260
  tasks = [loop.run_in_executor(executor, process_single_file, fp, grader, example_paper_file_obj) for fp in file_paths]
 
261
  results = [await f for f in asyncio.as_completed(tasks)]
262
- progress(1)
263
 
264
- # This section is updated to format the new, verbose JSON structure
265
  output_markdown = ""
266
  successful_grades = [res for res in results if res.success]
267
  failed_grades = [res for res in results if not res.success]
@@ -269,13 +269,19 @@ async def grade_papers_concurrently(
269
  for result in successful_grades:
270
  response_data = result.raw_response
271
  output_markdown += f"### ✅ Grade for: **{result.file_name}**\n"
272
- output_markdown += f"**Final Score:** {response_data.get('finalScore', 'N/A')}/100\n\n"
 
 
 
 
 
273
  output_markdown += "**Detailed Grading Breakdown:**\n"
274
- for item in response_data.get('gradingBreakdown', []):
275
  output_markdown += f"- **{item.get('criterion', 'N/A')}**: {item.get('score', 'N/A')} / {item.get('maxScore', 'N/A')}\n"
276
  output_markdown += f" - *Justification: {item.get('justification', 'No justification provided.')}*\n"
277
  output_markdown += f"\n**Summary:** {response_data.get('summary', 'No summary provided.')}\n"
278
  output_markdown += "---\n"
 
279
 
280
  if failed_grades:
281
  output_markdown += "### ❌ Failed Papers\n"
 
253
  example_paper_file_obj = genai.upload_file(path=example_paper_file.name, display_name=os.path.basename(example_paper_file.name))
254
 
255
  file_paths = [file.name for file in files]
256
+
 
257
  with ThreadPoolExecutor(max_workers=1) as executor:
258
  loop = asyncio.get_event_loop()
259
  tasks = [loop.run_in_executor(executor, process_single_file, fp, grader, example_paper_file_obj) for fp in file_paths]
260
+ # Use tqdm for progress tracking in the console/logs
261
  results = [await f for f in asyncio.as_completed(tasks)]
262
+ progress(1) # Mark progress as complete
263
 
264
+ # --- THIS SECTION IS UPDATED TO CALCULATE THE SCORE ---
265
  output_markdown = ""
266
  successful_grades = [res for res in results if res.success]
267
  failed_grades = [res for res in results if not res.success]
 
269
  for result in successful_grades:
270
  response_data = result.raw_response
271
  output_markdown += f"### ✅ Grade for: **{result.file_name}**\n"
272
+
273
+ # Calculate the score from the breakdown instead of trusting the AI's sum
274
+ breakdown = response_data.get('gradingBreakdown', [])
275
+ calculated_score = sum(item.get('score', 0) for item in breakdown)
276
+
277
+ output_markdown += f"**Final Score:** {calculated_score}/100\n\n"
278
  output_markdown += "**Detailed Grading Breakdown:**\n"
279
+ for item in breakdown:
280
  output_markdown += f"- **{item.get('criterion', 'N/A')}**: {item.get('score', 'N/A')} / {item.get('maxScore', 'N/A')}\n"
281
  output_markdown += f" - *Justification: {item.get('justification', 'No justification provided.')}*\n"
282
  output_markdown += f"\n**Summary:** {response_data.get('summary', 'No summary provided.')}\n"
283
  output_markdown += "---\n"
284
+ # --- END OF UPDATED SECTION ---
285
 
286
  if failed_grades:
287
  output_markdown += "### ❌ Failed Papers\n"