Datasets:
Update eval_pipeline/vlm_metrics_eval_step2.py
Browse files
eval_pipeline/vlm_metrics_eval_step2.py
CHANGED
|
@@ -301,15 +301,15 @@ def calculate_metrics(args):
|
|
| 301 |
|
| 302 |
if __name__ == "__main__":
|
| 303 |
parser = argparse.ArgumentParser(description="Calculate Gemini Evaluation Scores")
|
| 304 |
-
parser.add_argument("--model_order", default="bagel", help="
|
| 305 |
-
parser.add_argument("--answer_dir", required=True, help="
|
| 306 |
-
parser.add_argument("--output_file", required=True, help="
|
| 307 |
# Weight arguments (5 floats)
|
| 308 |
parser.add_argument("--weights", type=float, nargs=5, default=DEFAULT_WEIGHTS,
|
| 309 |
-
help="Q1-Q5
|
| 310 |
# Cutoff switch
|
| 311 |
parser.add_argument("--enable_cutoff", action="store_true",
|
| 312 |
-
help="
|
| 313 |
args = parser.parse_args()
|
| 314 |
|
| 315 |
calculate_metrics(args)
|
|
|
|
| 301 |
|
| 302 |
if __name__ == "__main__":
|
| 303 |
parser = argparse.ArgumentParser(description="Calculate Gemini Evaluation Scores")
|
| 304 |
+
parser.add_argument("--model_order", default="bagel", help="Comma-separated model names in the evaluation order")
|
| 305 |
+
parser.add_argument("--answer_dir", required=True, help="Directory containing Step 1 Gemini API evaluation results")
|
| 306 |
+
parser.add_argument("--output_file", required=True, help="Path to the output JSON report")
|
| 307 |
# Weight arguments (5 floats)
|
| 308 |
parser.add_argument("--weights", type=float, nargs=5, default=DEFAULT_WEIGHTS,
|
| 309 |
+
help="Weights for Q1-Q5. Default: 0.4 0.3 0.1 0.1 0.1")
|
| 310 |
# Cutoff switch
|
| 311 |
parser.add_argument("--enable_cutoff", action="store_true",
|
| 312 |
+
help="Enable cutoff logic: if Q1 < 4 (normalized 0.8), set later items to 0")
|
| 313 |
args = parser.parse_args()
|
| 314 |
|
| 315 |
calculate_metrics(args)
|