Jitin Krishnan commited on
Commit
c341352
·
1 Parent(s): 6c7ffba

Update space

Browse files
Files changed (5) hide show
  1. README.md +2 -2
  2. database.py +6 -6
  3. model +3 -3
  4. models.json +3 -3
  5. setup.py +5 -5
README.md CHANGED
@@ -13,7 +13,7 @@ sdk_version: 5.19.0
13
  ---
14
  # Model Performance Leaderboard
15
 
16
- This is a Hugging Face Space that hosts a leaderboard for comparing model performances across various metrics.
17
 
18
  ## Features
19
 
@@ -62,7 +62,7 @@ You can customize this leaderboard by modifying the `models.json` file:
62
 
63
  ```json
64
  {
65
- "title": "Model Performance Leaderboard",
66
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
67
  "metrics": ["accuracy", "f1_score", "precision", "recall"],
68
  "main_metric": "accuracy"
 
13
  ---
14
  # Model Performance Leaderboard
15
 
16
+ This is a Hugging Face Space that hosts a leaderboard for comparing model performances across various metrics of TRAIL dataset.
17
 
18
  ## Features
19
 
 
62
 
63
  ```json
64
  {
65
+ "title": "TRAIL Performance Leaderboard",
66
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
67
  "metrics": ["accuracy", "f1_score", "precision", "recall"],
68
  "main_metric": "accuracy"
database.py CHANGED
@@ -76,10 +76,10 @@ def load_config():
76
  print("models.json file is empty or missing. Creating with default configuration.")
77
  # Default configuration
78
  config = {
79
- "title": "Model Leaderboard",
80
  "description": "Submit and compare model performances",
81
- "metrics": ["accuracy", "f1_score", "precision", "recall"],
82
- "main_metric": "accuracy"
83
  }
84
  with open("models.json", "w") as f:
85
  json.dump(config, f, indent=2)
@@ -88,10 +88,10 @@ def load_config():
88
  print("Error parsing models.json. Creating with default configuration.")
89
  # Default configuration if JSON is invalid
90
  config = {
91
- "title": "Model Leaderboard",
92
  "description": "Submit and compare model performances",
93
- "metrics": ["accuracy", "f1_score", "precision", "recall"],
94
- "main_metric": "accuracy"
95
  }
96
  with open("models.json", "w") as f:
97
  json.dump(config, f, indent=2)
 
76
  print("models.json file is empty or missing. Creating with default configuration.")
77
  # Default configuration
78
  config = {
79
+ "title": "TRAIL Model Leaderboard",
80
  "description": "Submit and compare model performances",
81
+ "metrics": ["Cat. F1", "Loc. Acc", "Joint F1"],
82
+ "main_metric": "Cat. F1"
83
  }
84
  with open("models.json", "w") as f:
85
  json.dump(config, f, indent=2)
 
88
  print("Error parsing models.json. Creating with default configuration.")
89
  # Default configuration if JSON is invalid
90
  config = {
91
+ "title": "TRAIL Model Leaderboard",
92
  "description": "Submit and compare model performances",
93
+ "metrics": ["Cat. F1", "Loc. Acc", "Joint F1"],
94
+ "main_metric": "Cat. F1"
95
  }
96
  with open("models.json", "w") as f:
97
  json.dump(config, f, indent=2)
model CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "title": "Model Performance Leaderboard",
3
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
4
- "metrics": ["accuracy", "f1_score", "precision", "recall"],
5
- "main_metric": "accuracy"
6
  }
 
1
  {
2
+ "title": "TRAIL Performance Leaderboard",
3
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
4
+ "metrics": ["Cat. F1", "Loc. Acc", "Joint F1"],
5
+ "main_metric": "Cat. F1"
6
  }
models.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "title": "Model Performance Leaderboard",
3
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
4
- "metrics": ["accuracy", "f1_score", "precision", "recall"],
5
- "main_metric": "accuracy"
6
  }
 
1
  {
2
+ "title": "TRAIL Performance Leaderboard",
3
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
4
+ "metrics": ["Cat. F1", "Loc. Acc", "Joint F1"],
5
+ "main_metric": "Cat. F1"
6
  }
setup.py CHANGED
@@ -21,10 +21,10 @@ def setup():
21
  if not os.path.exists("models.json") or os.path.getsize("models.json") == 0:
22
  print("Creating models.json configuration file...")
23
  config = {
24
- "title": "Model Performance Leaderboard",
25
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
26
- "metrics": ["accuracy", "f1_score", "precision", "recall"],
27
- "main_metric": "accuracy"
28
  }
29
  with open("models.json", "w") as f:
30
  json.dump(config, f, indent=2)
@@ -39,8 +39,8 @@ def setup():
39
  config = {
40
  "title": "Model Performance Leaderboard",
41
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
42
- "metrics": ["accuracy", "f1_score", "precision", "recall"],
43
- "main_metric": "accuracy"
44
  }
45
  with open("models.json", "w") as f:
46
  json.dump(config, f, indent=2)
 
21
  if not os.path.exists("models.json") or os.path.getsize("models.json") == 0:
22
  print("Creating models.json configuration file...")
23
  config = {
24
+ "title": "TRAIL Performance Leaderboard",
25
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
26
+ "metrics": ["Cat. F1", "Loc. Acc", "Joint F1"],
27
+ "main_metric": "Cat. F1"
28
  }
29
  with open("models.json", "w") as f:
30
  json.dump(config, f, indent=2)
 
39
  config = {
40
  "title": "Model Performance Leaderboard",
41
  "description": "This leaderboard tracks and compares model performance across multiple metrics. Submit your model results to see how they stack up!",
42
+ "metrics": ["Cat. F1", "Loc. Acc", "Joint F1"],
43
+ "main_metric": "Cat. F1"
44
  }
45
  with open("models.json", "w") as f:
46
  json.dump(config, f, indent=2)