magnumical commited on
Commit
e549051
·
verified ·
1 Parent(s): 279af50

Upload model_deployment.py

Browse files
Files changed (1) hide show
  1. streamlit_ui/model_deployment.py +52 -68
streamlit_ui/model_deployment.py CHANGED
@@ -41,10 +41,12 @@ REQUEST_COUNT = Counter('audio_classifier_requests_total', 'Total number of requ
41
  RESPONSE_TIME = Histogram('audio_classifier_response_time_seconds', 'Time taken to process requests')
42
  ERROR_COUNT = Counter('audio_classifier_errors_total', 'Total number of errors during classification')
43
 
 
 
44
  # Start Prometheus HTTP server
45
- start_http_server(9100, addr="0.0.0.0") # Expose metrics on 9100 for external scraping
46
- # Expose metrics at http://localhost:9100/metrics
47
 
 
48
 
49
  def filtering(audio, sr):
50
  """
@@ -73,6 +75,26 @@ def filtering(audio, sr):
73
  filtered_audio = sosfilt(sos, audio)
74
  return filtered_audio
75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  ## Augmentation Functions
77
  def add_noise(data, noise_factor=0.001):
78
  noise = np.random.randn(len(data))
@@ -180,6 +202,8 @@ def _reshape_feature(feature, input_shape):
180
  feature = np.pad(feature, (0, expected_time_frames - len(feature)))
181
 
182
  return feature
 
 
183
  def classify_audio(model_type, feature_type, file_path):
184
  """
185
  Classify an audio file using the specified model.
@@ -220,22 +244,25 @@ def classify_audio(model_type, feature_type, file_path):
220
  logger.info(f"Prediction complete. Predicted class: {predicted_class}, Probabilities: {probabilities}")
221
  return predicted_class, probabilities
222
 
223
-
224
  def classify_audio_with_metrics(model_type, feature_type, file_path):
225
- """
226
- Wrapper around classify_audio to include Prometheus metrics.
227
- """
228
- REQUEST_COUNT.inc() # Increment request counter
 
229
  start_time = time.time()
230
  try:
231
- # Call the original classify_audio function
232
  result = classify_audio(model_type, feature_type, file_path)
233
  return result
234
  except Exception as e:
235
- ERROR_COUNT.inc() # Increment error counter
 
236
  raise
237
  finally:
238
- RESPONSE_TIME.observe(time.time() - start_time) # Observe response time
 
 
 
239
 
240
  def run():
241
  st.title("Respiratory Sound Classifier: Inference and Deployment")
@@ -328,9 +355,6 @@ def run():
328
  os.remove(temp_file_path)
329
 
330
  # Tab 3: Metrics Dashboard
331
- # Tab 3: Metrics Dashboard
332
-
333
-
334
  with tab3:
335
  st.subheader("Metrics Dashboard")
336
  st.markdown("""
@@ -338,66 +362,26 @@ def run():
338
  and error counts. These metrics are tracked internally and updated in real-time.
339
  """)
340
 
341
- # Real-time metrics visualization
342
  col1, col2, col3 = st.columns(3)
 
 
343
 
344
- # Display live metrics
345
- with col1:
346
- st.metric("Total Requests", REQUEST_COUNT._value.get())
347
- with col2:
348
- st.metric("Total Errors", ERROR_COUNT._value.get())
349
- with col3:
350
- # Calculate average response time
351
- response_time_sum = RESPONSE_TIME._sum.get() if hasattr(RESPONSE_TIME, '_sum') else 0
352
- response_time_count = RESPONSE_TIME._count.get() if hasattr(RESPONSE_TIME, '_count') else 0
353
- avg_response_time = response_time_sum / response_time_count if response_time_count > 0 else 0
354
- st.metric("Avg Response Time (s)", f"{avg_response_time:.3f}")
355
-
356
- # Response Time Histogram Visualization
357
- st.markdown("### Response Time Distribution")
358
-
359
- if hasattr(RESPONSE_TIME, "_buckets"):
360
- # Exclude the +Inf bucket
361
- response_time_data = RESPONSE_TIME._buckets[:-1]
362
- response_time_labels = [f"<= {bucket}" for bucket in range(1, len(response_time_data)+1)]
363
-
364
- # Create a DataFrame for bucket counts
365
- response_time_df = pd.DataFrame({
366
- "Time Range": response_time_labels,
367
- "Request Count": response_time_data
368
  })
369
-
370
- # Set the time range as the index
371
- response_time_df.set_index("Time Range", inplace=True)
372
-
373
- # Display the bar chart
374
- st.bar_chart(response_time_df)
375
-
376
- # Show the response time sum and average if the data is available
377
- st.markdown(f"**Total Response Time Sum**: {response_time_sum:.3f} seconds")
378
- st.markdown(f"**Average Response Time**: {avg_response_time:.3f} seconds")
379
-
380
  else:
381
- st.warning("No response time data available for visualization.")
382
 
383
- def save_uploaded_file(uploaded_file):
384
- """Save the uploaded file temporarily."""
385
- temp_file_path = os.path.join("temp_audio", uploaded_file.name)
386
- os.makedirs("temp_audio", exist_ok=True)
387
- with open(temp_file_path, "wb") as f:
388
- f.write(uploaded_file.getbuffer())
389
- return temp_file_path
390
-
391
-
392
- def display_results(predicted_class, probabilities, model_type):
393
- """Display the classification results."""
394
- class_label = CLASS_NAMES[model_type][predicted_class]
395
- st.success(f"Classification Complete! Predicted Class: **{class_label}**")
396
- st.write("### Prediction Probabilities")
397
- class_probabilities = {
398
- CLASS_NAMES[model_type][i]: prob for i, prob in enumerate(probabilities)
399
- }
400
- st.bar_chart(class_probabilities)
401
 
402
 
403
  if __name__ == "__main__":
 
41
  RESPONSE_TIME = Histogram('audio_classifier_response_time_seconds', 'Time taken to process requests')
42
  ERROR_COUNT = Counter('audio_classifier_errors_total', 'Total number of errors during classification')
43
 
44
+ REQUEST_COUNT._value.set(0)
45
+
46
  # Start Prometheus HTTP server
47
+ start_http_server(9100, addr="0.0.0.0")
 
48
 
49
+ individual_response_times = []
50
 
51
  def filtering(audio, sr):
52
  """
 
75
  filtered_audio = sosfilt(sos, audio)
76
  return filtered_audio
77
 
78
+
79
+ def save_uploaded_file(uploaded_file):
80
+ """Save the uploaded file temporarily."""
81
+ temp_file_path = os.path.join("temp_audio", uploaded_file.name)
82
+ os.makedirs("temp_audio", exist_ok=True)
83
+ with open(temp_file_path, "wb") as f:
84
+ f.write(uploaded_file.getbuffer())
85
+ return temp_file_path
86
+
87
+
88
+ def display_results(predicted_class, probabilities, model_type):
89
+ """Display the classification results."""
90
+ class_label = CLASS_NAMES[model_type][predicted_class]
91
+ st.success(f"Classification Complete! Predicted Class: **{class_label}**")
92
+ st.write("### Prediction Probabilities")
93
+ class_probabilities = {
94
+ CLASS_NAMES[model_type][i]: prob for i, prob in enumerate(probabilities)
95
+ }
96
+ st.bar_chart(class_probabilities)
97
+
98
  ## Augmentation Functions
99
  def add_noise(data, noise_factor=0.001):
100
  noise = np.random.randn(len(data))
 
202
  feature = np.pad(feature, (0, expected_time_frames - len(feature)))
203
 
204
  return feature
205
+
206
+
207
  def classify_audio(model_type, feature_type, file_path):
208
  """
209
  Classify an audio file using the specified model.
 
244
  logger.info(f"Prediction complete. Predicted class: {predicted_class}, Probabilities: {probabilities}")
245
  return predicted_class, probabilities
246
 
 
247
  def classify_audio_with_metrics(model_type, feature_type, file_path):
248
+ global individual_response_times
249
+
250
+ logger.info("Audio classification request received.")
251
+ REQUEST_COUNT.inc()
252
+
253
  start_time = time.time()
254
  try:
 
255
  result = classify_audio(model_type, feature_type, file_path)
256
  return result
257
  except Exception as e:
258
+ ERROR_COUNT.inc()
259
+ logger.error("Error during classification: %s", e)
260
  raise
261
  finally:
262
+ response_time = time.time() - start_time
263
+ RESPONSE_TIME.observe(response_time)
264
+ individual_response_times.append(response_time)
265
+ logger.info("Request processed. Response time: %.3f seconds", response_time)
266
 
267
  def run():
268
  st.title("Respiratory Sound Classifier: Inference and Deployment")
 
355
  os.remove(temp_file_path)
356
 
357
  # Tab 3: Metrics Dashboard
 
 
 
358
  with tab3:
359
  st.subheader("Metrics Dashboard")
360
  st.markdown("""
 
362
  and error counts. These metrics are tracked internally and updated in real-time.
363
  """)
364
 
 
365
  col1, col2, col3 = st.columns(3)
366
+ col1.metric("Total Requests", REQUEST_COUNT._value.get())
367
+ col2.metric("Total Errors", ERROR_COUNT._value.get())
368
 
369
+ if individual_response_times:
370
+ avg_response_time = sum(individual_response_times) / len(individual_response_times)
371
+ else:
372
+ avg_response_time = 0
373
+ col3.metric("Avg Response Time (s)", f"{avg_response_time:.3f}")
374
+
375
+ st.markdown("### Individual Response Times")
376
+ if individual_response_times:
377
+ df = pd.DataFrame({
378
+ "Request Index": range(1, len(individual_response_times) + 1),
379
+ "Response Time (s)": individual_response_times
 
 
 
 
 
 
 
 
 
 
 
 
 
380
  })
381
+ st.dataframe(df)
 
 
 
 
 
 
 
 
 
 
382
  else:
383
+ st.warning("No response time data available.")
384
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385
 
386
 
387
  if __name__ == "__main__":