Nuno-Tome commited on
Commit
1c65024
·
1 Parent(s): 4d005f8

feat: display 3 labels with scores per model in separate columns

Browse files
Files changed (1) hide show
  1. app.py +28 -13
app.py CHANGED
@@ -5,7 +5,7 @@ import pandas as pd
5
  from huggingface_hub import login
6
 
7
  MIN_ACEPTABLE_SCORE = 0.1
8
- MAX_N_LABELS = 5
9
 
10
  import os
11
 
@@ -64,13 +64,16 @@ def classify(image, model_name):
64
  return result
65
 
66
  def format_results(results):
67
- labels = []
68
- scores = []
69
  for r in results[:MAX_N_LABELS]:
70
  if r['score'] >= MIN_ACEPTABLE_SCORE:
71
- labels.append(r['label'])
72
- scores.append(f"{r['score']:.2%}")
73
- return "<br>".join(labels), "<br>".join(scores)
 
 
 
 
74
 
75
  def main():
76
  st.title("Image Classification - Compare All Models")
@@ -97,19 +100,27 @@ def main():
97
  status_text.text(f"Running model {i+1}/{len(MODELS)}: {model_name}")
98
  try:
99
  classification_result = classify(image_to_classify, model_name)
100
- labels, scores = format_results(classification_result)
101
  results_data.append({
102
  "Model": model_name,
103
  "Category": category,
104
- "Top Labels": labels,
105
- "Scores": scores
 
 
 
 
106
  })
107
  except Exception as e:
108
  results_data.append({
109
  "Model": model_name,
110
  "Category": category,
111
- "Top Labels": f"Error: {str(e)[:50]}",
112
- "Scores": "-"
 
 
 
 
113
  })
114
  progress_bar.progress((i + 1) / len(MODELS))
115
 
@@ -132,8 +143,12 @@ def main():
132
  column_config={
133
  "Model": st.column_config.TextColumn("Model", width="medium"),
134
  "Category": st.column_config.TextColumn("Category", width="small"),
135
- "Top Labels": st.column_config.TextColumn("Top Labels", width="large"),
136
- "Scores": st.column_config.TextColumn("Scores", width="medium"),
 
 
 
 
137
  }
138
  )
139
 
 
5
  from huggingface_hub import login
6
 
7
  MIN_ACEPTABLE_SCORE = 0.1
8
+ MAX_N_LABELS = 3
9
 
10
  import os
11
 
 
64
  return result
65
 
66
  def format_results(results):
67
+ formatted = []
 
68
  for r in results[:MAX_N_LABELS]:
69
  if r['score'] >= MIN_ACEPTABLE_SCORE:
70
+ formatted.append({
71
+ "label": r['label'],
72
+ "score": f"{r['score']:.2%}"
73
+ })
74
+ while len(formatted) < 3:
75
+ formatted.append({"label": "-", "score": "-"})
76
+ return formatted
77
 
78
  def main():
79
  st.title("Image Classification - Compare All Models")
 
100
  status_text.text(f"Running model {i+1}/{len(MODELS)}: {model_name}")
101
  try:
102
  classification_result = classify(image_to_classify, model_name)
103
+ formatted = format_results(classification_result)
104
  results_data.append({
105
  "Model": model_name,
106
  "Category": category,
107
+ "Label 1": formatted[0]["label"],
108
+ "Score 1": formatted[0]["score"],
109
+ "Label 2": formatted[1]["label"],
110
+ "Score 2": formatted[1]["score"],
111
+ "Label 3": formatted[2]["label"],
112
+ "Score 3": formatted[2]["score"],
113
  })
114
  except Exception as e:
115
  results_data.append({
116
  "Model": model_name,
117
  "Category": category,
118
+ "Label 1": f"Error: {str(e)[:50]}",
119
+ "Score 1": "-",
120
+ "Label 2": "-",
121
+ "Score 2": "-",
122
+ "Label 3": "-",
123
+ "Score 3": "-",
124
  })
125
  progress_bar.progress((i + 1) / len(MODELS))
126
 
 
143
  column_config={
144
  "Model": st.column_config.TextColumn("Model", width="medium"),
145
  "Category": st.column_config.TextColumn("Category", width="small"),
146
+ "Label 1": st.column_config.TextColumn("Label 1", width="medium"),
147
+ "Score 1": st.column_config.TextColumn("Score 1", width="small"),
148
+ "Label 2": st.column_config.TextColumn("Label 2", width="medium"),
149
+ "Score 2": st.column_config.TextColumn("Score 2", width="small"),
150
+ "Label 3": st.column_config.TextColumn("Label 3", width="medium"),
151
+ "Score 3": st.column_config.TextColumn("Score 3", width="small"),
152
  }
153
  )
154