Fix result script (#229)
Browse files* change afqmc
* fix results script
* remove prints
* change MassiveIntentClassification.json
* fix script
results/Bytedance__Seed-1.6-embedding/1/AFQMC.json
CHANGED
|
@@ -23,4 +23,4 @@
|
|
| 23 |
},
|
| 24 |
"evaluation_time": 47.32486319541931,
|
| 25 |
"kg_co2_emissions": null
|
| 26 |
-
}
|
|
|
|
| 23 |
},
|
| 24 |
"evaluation_time": 47.32486319541931,
|
| 25 |
"kg_co2_emissions": null
|
| 26 |
+
}
|
results/ai-forever__FRIDA/7292217af9a9e6dbf07048f76b434ad1e2aa8b76/MassiveIntentClassification.json
CHANGED
|
@@ -134,4 +134,4 @@
|
|
| 134 |
},
|
| 135 |
"evaluation_time": 47.84240365028381,
|
| 136 |
"kg_co2_emissions": null
|
| 137 |
-
}
|
|
|
|
| 134 |
},
|
| 135 |
"evaluation_time": 47.84240365028381,
|
| 136 |
"kg_co2_emissions": null
|
| 137 |
+
}
|
scripts/create_pr_results_comment.py
CHANGED
|
@@ -100,17 +100,19 @@ def create_comparison_table(
|
|
| 100 |
df[max_col_name] = None
|
| 101 |
task_results = mteb.load_results(tasks=tasks, download_latest=False)
|
| 102 |
task_results = task_results.join_revisions()
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
|
|
|
|
|
|
| 106 |
if not max_dataframe.empty:
|
| 107 |
for task_name, row in max_dataframe.iterrows():
|
| 108 |
-
df.loc[df[task_col_name] == task_name, max_col_name] =
|
| 109 |
-
row["score"] / 100
|
| 110 |
-
) # scores are in percentage
|
| 111 |
|
| 112 |
averages: dict[str, float | None] = {}
|
| 113 |
for col in models + [max_col_name]:
|
|
|
|
|
|
|
| 114 |
numeric = pd.to_numeric(df[col], errors="coerce")
|
| 115 |
avg = numeric.mean()
|
| 116 |
averages[col] = avg if not pd.isna(avg) else None
|
|
|
|
| 100 |
df[max_col_name] = None
|
| 101 |
task_results = mteb.load_results(tasks=tasks, download_latest=False)
|
| 102 |
task_results = task_results.join_revisions()
|
| 103 |
+
|
| 104 |
+
task_results_df = task_results.to_dataframe(format="long")
|
| 105 |
+
# some scores are in percentage, convert them to decimal
|
| 106 |
+
task_results_df.loc[task_results_df["score"] > 1, "score"] /= 100
|
| 107 |
+
max_dataframe = task_results_df.groupby(task_col_name).max()
|
| 108 |
if not max_dataframe.empty:
|
| 109 |
for task_name, row in max_dataframe.iterrows():
|
| 110 |
+
df.loc[df[task_col_name] == task_name, max_col_name] = row["score"]
|
|
|
|
|
|
|
| 111 |
|
| 112 |
averages: dict[str, float | None] = {}
|
| 113 |
for col in models + [max_col_name]:
|
| 114 |
+
if col not in df.columns:
|
| 115 |
+
continue
|
| 116 |
numeric = pd.to_numeric(df[col], errors="coerce")
|
| 117 |
avg = numeric.mean()
|
| 118 |
averages[col] = avg if not pd.isna(avg) else None
|