Spaces:
Sleeping
Sleeping
Update src/leaderboard.py
Browse files- src/leaderboard.py +58 -82
src/leaderboard.py
CHANGED
|
@@ -118,88 +118,6 @@ def save_leaderboard(df: pd.DataFrame) -> bool:
|
|
| 118 |
print(f"Error saving leaderboard: {e}")
|
| 119 |
return False
|
| 120 |
|
| 121 |
-
# def add_model_to_leaderboard(
|
| 122 |
-
# model_name: str,
|
| 123 |
-
# author: str,
|
| 124 |
-
# evaluation_results: Dict,
|
| 125 |
-
# validation_info: Dict,
|
| 126 |
-
# model_type: str = "",
|
| 127 |
-
# description: str = ""
|
| 128 |
-
# ) -> pd.DataFrame:
|
| 129 |
-
# """Add new model results to leaderboard."""
|
| 130 |
-
|
| 131 |
-
# # Load current leaderboard
|
| 132 |
-
# df = load_leaderboard()
|
| 133 |
-
|
| 134 |
-
# # Check if model already exists
|
| 135 |
-
# existing_mask = df['model_name'] == model_name
|
| 136 |
-
# if existing_mask.any():
|
| 137 |
-
# print(f"Model '{model_name}' already exists. Updating...")
|
| 138 |
-
# df = df[~existing_mask] # Remove existing entry
|
| 139 |
-
|
| 140 |
-
# # Extract metrics
|
| 141 |
-
# averages = evaluation_results.get('averages', {})
|
| 142 |
-
# google_averages = evaluation_results.get('google_comparable_averages', {})
|
| 143 |
-
# summary = evaluation_results.get('summary', {})
|
| 144 |
-
|
| 145 |
-
# # Create new entry
|
| 146 |
-
# new_entry = {
|
| 147 |
-
# 'submission_id': create_submission_id(),
|
| 148 |
-
# 'model_name': sanitize_model_name(model_name),
|
| 149 |
-
# 'author': author[:100] if author else 'Anonymous',
|
| 150 |
-
# 'submission_date': datetime.datetime.now().isoformat(),
|
| 151 |
-
# 'model_type': model_type[:50] if model_type else 'unknown',
|
| 152 |
-
# 'description': description[:500] if description else '',
|
| 153 |
-
|
| 154 |
-
# # Primary metrics
|
| 155 |
-
# 'quality_score': float(averages.get('quality_score', 0.0)),
|
| 156 |
-
# 'bleu': float(averages.get('bleu', 0.0)),
|
| 157 |
-
# 'chrf': float(averages.get('chrf', 0.0)),
|
| 158 |
-
|
| 159 |
-
# # Secondary metrics
|
| 160 |
-
# 'rouge1': float(averages.get('rouge1', 0.0)),
|
| 161 |
-
# 'rouge2': float(averages.get('rouge2', 0.0)),
|
| 162 |
-
# 'rougeL': float(averages.get('rougeL', 0.0)),
|
| 163 |
-
# 'cer': float(averages.get('cer', 0.0)),
|
| 164 |
-
# 'wer': float(averages.get('wer', 0.0)),
|
| 165 |
-
# 'len_ratio': float(averages.get('len_ratio', 0.0)),
|
| 166 |
-
|
| 167 |
-
# # Google comparable metrics
|
| 168 |
-
# 'google_quality_score': float(google_averages.get('quality_score', 0.0)),
|
| 169 |
-
# 'google_bleu': float(google_averages.get('bleu', 0.0)),
|
| 170 |
-
# 'google_chrf': float(google_averages.get('chrf', 0.0)),
|
| 171 |
-
|
| 172 |
-
# # Coverage info
|
| 173 |
-
# 'total_samples': int(summary.get('total_samples', 0)),
|
| 174 |
-
# 'language_pairs_covered': int(summary.get('language_pairs_covered', 0)),
|
| 175 |
-
# 'google_pairs_covered': int(summary.get('google_comparable_pairs', 0)),
|
| 176 |
-
# 'coverage_rate': float(validation_info.get('coverage', 0.0)),
|
| 177 |
-
|
| 178 |
-
# # Detailed results
|
| 179 |
-
# 'detailed_metrics': json.dumps(evaluation_results),
|
| 180 |
-
# 'validation_report': validation_info.get('report', ''),
|
| 181 |
-
|
| 182 |
-
# # Metadata
|
| 183 |
-
# 'evaluation_date': datetime.datetime.now().isoformat(),
|
| 184 |
-
# 'leaderboard_version': 1
|
| 185 |
-
# }
|
| 186 |
-
|
| 187 |
-
# # Add to dataframe
|
| 188 |
-
# new_row_df = pd.DataFrame([new_entry])
|
| 189 |
-
# updated_df = pd.concat([df, new_row_df], ignore_index=True)
|
| 190 |
-
|
| 191 |
-
# # Sort by quality score (descending)
|
| 192 |
-
# updated_df = updated_df.sort_values('quality_score', ascending=False).reset_index(drop=True)
|
| 193 |
-
|
| 194 |
-
# # Save updated leaderboard
|
| 195 |
-
# if save_leaderboard(updated_df):
|
| 196 |
-
# print(f"Added '{model_name}' to leaderboard")
|
| 197 |
-
# return updated_df
|
| 198 |
-
# else:
|
| 199 |
-
# print("Failed to save leaderboard")
|
| 200 |
-
# return df
|
| 201 |
-
|
| 202 |
-
|
| 203 |
def add_model_to_leaderboard(
|
| 204 |
model_name: str,
|
| 205 |
author: str,
|
|
@@ -284,6 +202,64 @@ def add_model_to_leaderboard(
|
|
| 284 |
|
| 285 |
return updated_df
|
| 286 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 287 |
def get_leaderboard_stats(df: pd.DataFrame) -> Dict:
|
| 288 |
"""Get summary statistics for the leaderboard."""
|
| 289 |
|
|
|
|
| 118 |
print(f"Error saving leaderboard: {e}")
|
| 119 |
return False
|
| 120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
def add_model_to_leaderboard(
|
| 122 |
model_name: str,
|
| 123 |
author: str,
|
|
|
|
| 202 |
|
| 203 |
return updated_df
|
| 204 |
|
| 205 |
+
def prepare_leaderboard_display(df: pd.DataFrame) -> pd.DataFrame:
|
| 206 |
+
"""Prepare leaderboard for display by formatting and selecting appropriate columns."""
|
| 207 |
+
|
| 208 |
+
if df.empty:
|
| 209 |
+
return df
|
| 210 |
+
|
| 211 |
+
# Select columns for display (exclude detailed_metrics and validation_report)
|
| 212 |
+
display_columns = [
|
| 213 |
+
'model_name', 'author', 'submission_date', 'model_type',
|
| 214 |
+
'quality_score', 'bleu', 'chrf',
|
| 215 |
+
'rouge1', 'rougeL',
|
| 216 |
+
'total_samples', 'language_pairs_covered', 'google_pairs_covered',
|
| 217 |
+
'coverage_rate'
|
| 218 |
+
]
|
| 219 |
+
|
| 220 |
+
# Only include columns that exist
|
| 221 |
+
available_columns = [col for col in display_columns if col in df.columns]
|
| 222 |
+
display_df = df[available_columns].copy()
|
| 223 |
+
|
| 224 |
+
# Format numeric columns
|
| 225 |
+
numeric_format = {
|
| 226 |
+
'quality_score': '{:.4f}',
|
| 227 |
+
'bleu': '{:.2f}',
|
| 228 |
+
'chrf': '{:.4f}',
|
| 229 |
+
'rouge1': '{:.4f}',
|
| 230 |
+
'rougeL': '{:.4f}',
|
| 231 |
+
'coverage_rate': '{:.1%}',
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
for col, fmt in numeric_format.items():
|
| 235 |
+
if col in display_df.columns:
|
| 236 |
+
display_df[col] = display_df[col].apply(lambda x: fmt.format(float(x)) if pd.notnull(x) else "0.0000")
|
| 237 |
+
|
| 238 |
+
# Format submission date
|
| 239 |
+
if 'submission_date' in display_df.columns:
|
| 240 |
+
display_df['submission_date'] = pd.to_datetime(display_df['submission_date']).dt.strftime('%Y-%m-%d %H:%M')
|
| 241 |
+
|
| 242 |
+
# Rename columns for better display
|
| 243 |
+
column_renames = {
|
| 244 |
+
'model_name': 'Model Name',
|
| 245 |
+
'author': 'Author',
|
| 246 |
+
'submission_date': 'Submitted',
|
| 247 |
+
'model_type': 'Type',
|
| 248 |
+
'quality_score': 'Quality Score',
|
| 249 |
+
'bleu': 'BLEU',
|
| 250 |
+
'chrf': 'ChrF',
|
| 251 |
+
'rouge1': 'ROUGE-1',
|
| 252 |
+
'rougeL': 'ROUGE-L',
|
| 253 |
+
'total_samples': 'Samples',
|
| 254 |
+
'language_pairs_covered': 'Lang Pairs',
|
| 255 |
+
'google_pairs_covered': 'Google Pairs',
|
| 256 |
+
'coverage_rate': 'Coverage'
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
display_df = display_df.rename(columns=column_renames)
|
| 260 |
+
|
| 261 |
+
return display_df
|
| 262 |
+
|
| 263 |
def get_leaderboard_stats(df: pd.DataFrame) -> Dict:
|
| 264 |
"""Get summary statistics for the leaderboard."""
|
| 265 |
|