argmin commited on
Commit
3ed2d98
·
1 Parent(s): 630fa04

add status bar

Browse files
Files changed (1) hide show
  1. app/main.py +25 -9
app/main.py CHANGED
@@ -174,8 +174,16 @@ if uploaded_file:
174
  # Dynamically create the Pydantic model for validation
175
  ClassificationOutput = generate_classification_model(list(label_descriptions.keys()))
176
 
 
 
 
 
177
  # Function to classify a single row
178
- def classify_row(row):
 
 
 
 
179
  # Generate system and user prompts
180
  system_prompt, user_prompt = generate_prompts(
181
  row=row.to_dict(),
@@ -206,25 +214,33 @@ if uploaded_file:
206
  )
207
 
208
  # Apply the classification to each row in the limited data
209
- limited_data[prediction_column] = limited_data.apply(classify_row, axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
  # Display Predictions
212
  st.write(f"### Predictions ({prediction_column})", limited_data)
213
 
 
214
  if label_column in limited_data.columns:
215
- # Evaluation Mode: Target column exists
216
  from utils.evaluation import evaluate_predictions
217
  report = evaluate_predictions(limited_data[label_column], limited_data[prediction_column])
218
  st.write("### Evaluation Metrics")
219
  display_metrics_as_table(report)
220
  else:
221
- # Inference Mode: No target column
222
- st.warning("Inference mode: No target column provided, so no evaluation metrics are available.")
223
-
224
  # Count predictions
225
  label_counts = limited_data[prediction_column].value_counts().reset_index()
226
  label_counts.columns = ["Label", "Count"]
227
-
228
- # Display prediction stats
229
  st.subheader("Prediction Statistics")
230
- st.table(label_counts)
 
174
  # Dynamically create the Pydantic model for validation
175
  ClassificationOutput = generate_classification_model(list(label_descriptions.keys()))
176
 
177
+ # Create a placeholder for the progress bar
178
+ progress_bar = st.progress(0)
179
+ progress_text = st.empty()
180
+
181
  # Function to classify a single row
182
+ def classify_row(row, index, total_rows):
183
+ # Update progress bar
184
+ progress_bar.progress((index + 1) / total_rows)
185
+ progress_text.text(f"Processing row {index + 1}/{total_rows}...")
186
+
187
  # Generate system and user prompts
188
  system_prompt, user_prompt = generate_prompts(
189
  row=row.to_dict(),
 
214
  )
215
 
216
  # Apply the classification to each row in the limited data
217
+ total_rows = len(limited_data)
218
+ predictions = []
219
+
220
+ for index, row in limited_data.iterrows():
221
+ prediction = classify_row(row, index, total_rows)
222
+ predictions.append(prediction)
223
+
224
+ # Add predictions to the DataFrame
225
+ limited_data[prediction_column] = predictions
226
+
227
+ # Reset progress bar and text
228
+ progress_bar.empty()
229
+ progress_text.empty()
230
 
231
  # Display Predictions
232
  st.write(f"### Predictions ({prediction_column})", limited_data)
233
 
234
+ # Evaluate if ground truth is available
235
  if label_column in limited_data.columns:
 
236
  from utils.evaluation import evaluate_predictions
237
  report = evaluate_predictions(limited_data[label_column], limited_data[prediction_column])
238
  st.write("### Evaluation Metrics")
239
  display_metrics_as_table(report)
240
  else:
241
+ st.warning(f"Inference mode: No target column provided, so no evaluation metrics are available.")
 
 
242
  # Count predictions
243
  label_counts = limited_data[prediction_column].value_counts().reset_index()
244
  label_counts.columns = ["Label", "Count"]
 
 
245
  st.subheader("Prediction Statistics")
246
+ st.table(label_counts)