Datasets:
Fix 5 high-priority bugs: caching, rate-limit retry, scoring, types, error msg
Browse files- Remove force_redownload/force_download/force_extract so dataset uses cache
- Add 429 (rate limit) to retryable HTTP status codes in llm_interface.py
- Score parse/API failures as 0 instead of -1 (not a deliberate wrong choice)
- Fix return type annotation: list[int] -> list[str] in get_openrouter_prediction
- Fix wrong filename in error message: jee_neet_benchmark_dataset.py -> jee-neet-benchmark.py
- Fix pre-existing wrong test assertions in evaluation.py (correct_full=7, incorrect=8)
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- jee-neet-benchmark.py +0 -5
- src/benchmark_runner.py +3 -6
- src/evaluation.py +6 -10
- src/llm_interface.py +4 -4
jee-neet-benchmark.py
CHANGED
|
@@ -81,11 +81,6 @@ class JeeNeetBenchmark(datasets.GeneratorBasedBuilder):
|
|
| 81 |
repo_metadata_path = os.path.join("data", "metadata.jsonl")
|
| 82 |
repo_images_archive_path = "images.tar.gz" # At the root of the repository
|
| 83 |
|
| 84 |
-
# Ensure force download and extract for the current run
|
| 85 |
-
# dl_manager.download_config is an instance of datasets.DownloadConfig
|
| 86 |
-
dl_manager.download_config.force_download = True
|
| 87 |
-
dl_manager.download_config.force_extract = True # If redownloading, re-extraction is also desired
|
| 88 |
-
|
| 89 |
try:
|
| 90 |
# Download and extract metadata and images archive
|
| 91 |
downloaded_files = dl_manager.download_and_extract({
|
|
|
|
| 81 |
repo_metadata_path = os.path.join("data", "metadata.jsonl")
|
| 82 |
repo_images_archive_path = "images.tar.gz" # At the root of the repository
|
| 83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
try:
|
| 85 |
# Download and extract metadata and images archive
|
| 86 |
downloaded_files = dl_manager.download_and_extract({
|
src/benchmark_runner.py
CHANGED
|
@@ -210,10 +210,7 @@ def generate_markdown_summary(summary: Dict[str, Any], filepath: str):
|
|
| 210 |
if skipped_count_q > 0:
|
| 211 |
calculation_parts.append(f"{skipped_count_q} Skipped (0)")
|
| 212 |
if api_fail_count_q > 0:
|
| 213 |
-
|
| 214 |
-
penalty_per_api_fail = -1
|
| 215 |
-
if q_type == "INTEGER": penalty_per_api_fail = 0
|
| 216 |
-
calculation_parts.append(f"{api_fail_count_q} API/Parse Fail ({penalty_per_api_fail})")
|
| 217 |
|
| 218 |
calculation_str = " + ".join(part for part in calculation_parts if part)
|
| 219 |
if not calculation_str:
|
|
@@ -475,12 +472,12 @@ def run_benchmark(
|
|
| 475 |
# Explicitly specify data_files and data_dir for local loading.
|
| 476 |
# data_dir should be the project root ('.') when loading a local script,
|
| 477 |
# as the script is copied to a cache and needs to know where the actual data is.
|
| 478 |
-
dataset = load_dataset(dataset_path, split='test', data_files={'test': 'data/metadata.jsonl'}, data_dir=os.getcwd(), trust_remote_code=True
|
| 479 |
dataset = dataset.cast_column("image", HFImage(decode=True)) # Ensure images are loaded as PIL
|
| 480 |
logging.info(f"Dataset loaded successfully from path: {dataset_path}. Original number of questions: {len(dataset)}")
|
| 481 |
except Exception as e:
|
| 482 |
logging.error(f"Failed to load dataset from path '{dataset_path}': {e}")
|
| 483 |
-
logging.error("Ensure the path is correct and '
|
| 484 |
return
|
| 485 |
|
| 486 |
# Filter dataset based on choices
|
|
|
|
| 210 |
if skipped_count_q > 0:
|
| 211 |
calculation_parts.append(f"{skipped_count_q} Skipped (0)")
|
| 212 |
if api_fail_count_q > 0:
|
| 213 |
+
calculation_parts.append(f"{api_fail_count_q} API/Parse Fail (0)")
|
|
|
|
|
|
|
|
|
|
| 214 |
|
| 215 |
calculation_str = " + ".join(part for part in calculation_parts if part)
|
| 216 |
if not calculation_str:
|
|
|
|
| 472 |
# Explicitly specify data_files and data_dir for local loading.
|
| 473 |
# data_dir should be the project root ('.') when loading a local script,
|
| 474 |
# as the script is copied to a cache and needs to know where the actual data is.
|
| 475 |
+
dataset = load_dataset(dataset_path, split='test', data_files={'test': 'data/metadata.jsonl'}, data_dir=os.getcwd(), trust_remote_code=True)
|
| 476 |
dataset = dataset.cast_column("image", HFImage(decode=True)) # Ensure images are loaded as PIL
|
| 477 |
logging.info(f"Dataset loaded successfully from path: {dataset_path}. Original number of questions: {len(dataset)}")
|
| 478 |
except Exception as e:
|
| 479 |
logging.error(f"Failed to load dataset from path '{dataset_path}': {e}")
|
| 480 |
+
logging.error("Ensure the path is correct and 'jee-neet-benchmark.py' exists.")
|
| 481 |
return
|
| 482 |
|
| 483 |
# Filter dataset based on choices
|
src/evaluation.py
CHANGED
|
@@ -133,11 +133,7 @@ def calculate_single_question_score_details(result_item: Dict[str, Any]) -> Dict
|
|
| 133 |
|
| 134 |
if not api_success or pred is None: # pred is None means our internal parsing failed
|
| 135 |
evaluation_status = "failure_api_or_parse"
|
| 136 |
-
current_score_change =
|
| 137 |
-
if exam_name == "JEE_MAIN" and question_type == "INTEGER":
|
| 138 |
-
current_score_change = 0
|
| 139 |
-
if exam_name == "JEE_ADVANCED" and question_type == "INTEGER":
|
| 140 |
-
current_score_change = 0
|
| 141 |
elif isinstance(pred, str) and pred.upper() == "SKIP": # Standardize SKIP comparison
|
| 142 |
current_score_change = 0
|
| 143 |
evaluation_status = "skipped"
|
|
@@ -477,16 +473,16 @@ if __name__ == '__main__':
|
|
| 477 |
print(json.dumps(exam_summary, indent=2, sort_keys=True))
|
| 478 |
|
| 479 |
# Basic assertions - can be expanded
|
| 480 |
-
assert exam_summary["overall_score"] == (4-1+0
|
| 481 |
-
assert exam_summary["overall_correct_full"] ==
|
| 482 |
assert exam_summary["overall_partial_correct"] == 3
|
| 483 |
-
assert exam_summary["overall_incorrect_choice"] ==
|
| 484 |
assert exam_summary["overall_skipped"] == 2
|
| 485 |
assert exam_summary["overall_api_parse_failures"] == 3 # N004, N005, JM005
|
| 486 |
|
| 487 |
assert exam_summary["section_breakdown"]["Physics"]["score"] == (4-1) + (4+0) + (4+0) - 2 # N001,N002 + JM003,JM004 + JA003,JA004 + JA013
|
| 488 |
-
assert exam_summary["section_breakdown"]["Chemistry"]["score"] == (0
|
| 489 |
-
assert exam_summary["section_breakdown"]["Botany"]["score"] ==
|
| 490 |
assert exam_summary["section_breakdown"]["Maths"]["score"] == (4-1) + (3-1) + 4 # JM001,JM002 + JA001,JA002 + JA012
|
| 491 |
|
| 492 |
print("\nEvaluation tests completed.")
|
|
|
|
| 133 |
|
| 134 |
if not api_success or pred is None: # pred is None means our internal parsing failed
|
| 135 |
evaluation_status = "failure_api_or_parse"
|
| 136 |
+
current_score_change = 0 # No penalty: parse/API failure is not a deliberate wrong choice
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
elif isinstance(pred, str) and pred.upper() == "SKIP": # Standardize SKIP comparison
|
| 138 |
current_score_change = 0
|
| 139 |
evaluation_status = "skipped"
|
|
|
|
| 473 |
print(json.dumps(exam_summary, indent=2, sort_keys=True))
|
| 474 |
|
| 475 |
# Basic assertions - can be expanded
|
| 476 |
+
assert exam_summary["overall_score"] == (4-1+0+0+0) + (4-1) + (4+0+0) + (3-1) + (4+0) + (4+2+3+1-2-2+0+4-2)
|
| 477 |
+
assert exam_summary["overall_correct_full"] == 7
|
| 478 |
assert exam_summary["overall_partial_correct"] == 3
|
| 479 |
+
assert exam_summary["overall_incorrect_choice"] == 8
|
| 480 |
assert exam_summary["overall_skipped"] == 2
|
| 481 |
assert exam_summary["overall_api_parse_failures"] == 3 # N004, N005, JM005
|
| 482 |
|
| 483 |
assert exam_summary["section_breakdown"]["Physics"]["score"] == (4-1) + (4+0) + (4+0) - 2 # N001,N002 + JM003,JM004 + JA003,JA004 + JA013
|
| 484 |
+
assert exam_summary["section_breakdown"]["Chemistry"]["score"] == (0+0) + (0) + (4+2+3+1-2-2+0) # N003,N004 + JM005 + JA005-JA011
|
| 485 |
+
assert exam_summary["section_breakdown"]["Botany"]["score"] == 0 # N005 (parse fail = 0 penalty)
|
| 486 |
assert exam_summary["section_breakdown"]["Maths"]["score"] == (4-1) + (3-1) + 4 # JM001,JM002 + JA001,JA002 + JA012
|
| 487 |
|
| 488 |
print("\nEvaluation tests completed.")
|
src/llm_interface.py
CHANGED
|
@@ -29,7 +29,7 @@ RETRYABLE_EXCEPTIONS = (
|
|
| 29 |
)
|
| 30 |
|
| 31 |
# Define status codes that warrant a retry
|
| 32 |
-
RETRYABLE_STATUS_CODES = {500, 502, 503, 504}
|
| 33 |
|
| 34 |
# Retry decorator configuration
|
| 35 |
retry_config = dict(
|
|
@@ -103,7 +103,7 @@ def get_openrouter_prediction(
|
|
| 103 |
question_type: str = "MCQ_SINGLE_CORRECT", # New parameter with default
|
| 104 |
max_tokens: int = 100,
|
| 105 |
request_timeout: int = 60
|
| 106 |
-
) -> tuple[list[
|
| 107 |
"""
|
| 108 |
Gets a prediction from an OpenRouter model. Handles initial image prompts and text-only re-prompts.
|
| 109 |
|
|
@@ -119,8 +119,8 @@ def get_openrouter_prediction(
|
|
| 119 |
request_timeout (int): Timeout for the API request in seconds.
|
| 120 |
|
| 121 |
Returns:
|
| 122 |
-
tuple[list[
|
| 123 |
-
- The parsed answer as a list of
|
| 124 |
- The raw response text from the LLM (or None if API call failed).
|
| 125 |
|
| 126 |
Raises:
|
|
|
|
| 29 |
)
|
| 30 |
|
| 31 |
# Define status codes that warrant a retry
|
| 32 |
+
RETRYABLE_STATUS_CODES = {429, 500, 502, 503, 504}
|
| 33 |
|
| 34 |
# Retry decorator configuration
|
| 35 |
retry_config = dict(
|
|
|
|
| 103 |
question_type: str = "MCQ_SINGLE_CORRECT", # New parameter with default
|
| 104 |
max_tokens: int = 100,
|
| 105 |
request_timeout: int = 60
|
| 106 |
+
) -> tuple[list[str] | str | None, str | None]: # Allow predicted_answer to be "SKIP"
|
| 107 |
"""
|
| 108 |
Gets a prediction from an OpenRouter model. Handles initial image prompts and text-only re-prompts.
|
| 109 |
|
|
|
|
| 119 |
request_timeout (int): Timeout for the API request in seconds.
|
| 120 |
|
| 121 |
Returns:
|
| 122 |
+
tuple[list[str] | str | None, str | None]: A tuple containing:
|
| 123 |
+
- The parsed answer as a list of strings, the string "SKIP", or None if failed.
|
| 124 |
- The raw response text from the LLM (or None if API call failed).
|
| 125 |
|
| 126 |
Raises:
|