fschmid56 commited on
Commit
5ecbfd8
·
verified ·
1 Parent(s): cc6722c

Upload compute_cost.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. compute_cost.py +287 -0
compute_cost.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import math
4
+ import numpy as np
5
+ import pandas as pd
6
+
7
+ from typing import Iterable, Dict, List, Optional
8
+
9
+ CLASSES = ['Speech', 'Shout', 'Chainsaw', 'Jackhammer', 'Lawn Mower', 'Power Drill', 'Dog Bark', 'Rooster Crow', 'Horn Honk', 'Siren']
10
+
11
+ COST_MATRIX = {
12
+ "Speech": {"TP": 0, "FP": 1, "TN": 0, "FN": 5},
13
+ "Dog Bark": {"TP": 0, "FP": 1, "TN": 0, "FN": 5},
14
+ "Rooster Crow": {"TP": 0, "FP": 1, "TN": 0, "FN": 5},
15
+ "Shout": {"TP": 0, "FP": 2, "TN": 0, "FN": 10},
16
+ "Lawn Mower": {"TP": 0, "FP": 3, "TN": 0, "FN": 15},
17
+ "Chainsaw": {"TP": 0, "FP": 3, "TN": 0, "FN": 15},
18
+ "Jackhammer": {"TP": 0, "FP": 3, "TN": 0, "FN": 15},
19
+ "Power Drill": {"TP": 0, "FP": 3, "TN": 0, "FN": 15},
20
+ "Horn Honk": {"TP": 0, "FP": 3, "TN": 0, "FN": 15},
21
+ "Siren": {"TP": 0, "FP": 3, "TN": 0, "FN": 15},
22
+ }
23
+
24
+ def check_dataframe(data_frame, dataset_path):
25
+ """
26
+ Validates the integrity of a predictions or ground truth DataFrame.
27
+
28
+ Parameters:
29
+ ----------
30
+ predictions_df : pandas.DataFrame
31
+ A DataFrame containing model predictions or the ground truth.
32
+ It must include columns:
33
+ - 'filename': Name of the audio file (e.g., "xyz.wav")
34
+ - 'onset': Onset times or frame indices
35
+ - One column for each class in the global `CLASSES` list
36
+
37
+ dataset_path : str
38
+ Path to the root of the dataset directory. It must contain a
39
+ subdirectory 'audio_features' with `.npz` files for each audio file.
40
+
41
+ Raises:
42
+ ------
43
+ AssertionError:
44
+ If any of the following checks fail:
45
+ - The dataset or audio_features directory doesn't exist
46
+ - The DataFrame is missing required columns
47
+ - Expected feature files are missing
48
+ - Number of predictions doesn't match the number of expected timesteps
49
+
50
+ Example:
51
+ -------
52
+ check_dataframe(predicted_df, "MLPC2025_dataset")
53
+ """
54
+ audio_features_path = os.path.join(dataset_path, "audio_features")
55
+ assert os.path.exists(dataset_path), f"Dataset path '{dataset_path}' does not exist."
56
+ assert os.path.exists(audio_features_path), f"Audio features path '{audio_features_path}' does not exist."
57
+
58
+ required_columns = set(CLASSES + ["filename", "onset"])
59
+ missing_columns = required_columns - set(data_frame.columns)
60
+ assert not missing_columns, f"Missing columns in predictions_df: {missing_columns}"
61
+
62
+ assert ((data_frame["onset"] / 1.2) % 1).apply(lambda x: np.isclose(x, 0, atol=0.1)).all(), "Not all values are divisible by 1.2."
63
+ assert data_frame[CLASSES].isin([0, 1]).all().all(), "Not all predictions are 0 or 1."
64
+
65
+ for filename in data_frame["filename"].unique():
66
+ file_id = os.path.splitext(filename)[0]
67
+ feature_file = os.path.join(audio_features_path, f"{file_id}.npz")
68
+
69
+ assert os.path.exists(feature_file), f"Feature file '{feature_file}' does not exist."
70
+
71
+ embeddings = np.load(feature_file)["embeddings"]
72
+ expected_timesteps = math.ceil(len(embeddings) / 10)
73
+ actual_timesteps = len(data_frame[data_frame["filename"] == filename])
74
+
75
+ assert actual_timesteps == expected_timesteps, (
76
+ f"Mismatch in timesteps for '{filename}': expected {expected_timesteps}, found {actual_timesteps}."
77
+ )
78
+
79
+
80
+ def total_cost(predictions_df, ground_truth_df):
81
+ """
82
+ Computes total cost of predictions based on a cost matrix for TP, FP, TN, and FN
83
+ for each class in a multilabel classification problem.
84
+
85
+ Parameters:
86
+ ----------
87
+ predictions_df : pandas.DataFrame
88
+ DataFrame containing predicted binary labels (0 or 1) for each class in CLASSES.
89
+
90
+ ground_truth_df : pandas.DataFrame
91
+ DataFrame containing ground truth binary labels for each class in CLASSES.
92
+
93
+ Returns:
94
+ -------
95
+ total_cost_value : float
96
+ Total cost across all classes and samples.
97
+
98
+ metrics_per_class : dict
99
+ Dictionary with TP, FP, TN, FN counts and cost per class.
100
+ """
101
+
102
+ # Align rows by filename and onset
103
+ merged = predictions_df.merge(
104
+ ground_truth_df,
105
+ on=["filename", "onset"],
106
+ suffixes=("_pred", "_true"),
107
+ how="inner",
108
+ validate="one_to_one"
109
+ )
110
+
111
+ if merged.shape[0] != predictions_df.shape[0]:
112
+ raise ValueError("Mismatch in alignment between prediction and ground truth rows")
113
+
114
+ metrics_per_class = {}
115
+
116
+ for cls in CLASSES:
117
+ y_pred = predictions_df[cls].astype(int)
118
+ y_true = ground_truth_df[cls].astype(int)
119
+
120
+ TP = ((y_pred == 1) & (y_true == 1)).mean() * 50
121
+ FP = ((y_pred == 1) & (y_true == 0)).mean() * 50
122
+ TN = ((y_pred == 0) & (y_true == 0)).mean() * 50
123
+ FN = ((y_pred == 0) & (y_true == 1)).mean() * 50
124
+
125
+ cost = (
126
+ COST_MATRIX[cls]["TP"] * TP +
127
+ COST_MATRIX[cls]["FP"] * FP +
128
+ COST_MATRIX[cls]["TN"] * TN +
129
+ COST_MATRIX[cls]["FN"] * FN
130
+ )
131
+
132
+ metrics_per_class[cls] = {
133
+ "TP": TP, "FP": FP, "TN": TN, "FN": FN, "cost": cost
134
+ }
135
+
136
+ return sum([metrics_per_class[c]["cost"] for c in metrics_per_class]), metrics_per_class
137
+
138
+
139
+ def aggregate_targets(arr: np.ndarray, f: int = 10) -> np.ndarray:
140
+ """
141
+ Aggregates frame-level ground truths into segment-level by taking the max over fixed-size chunks.
142
+
143
+ Parameters:
144
+ ----------
145
+ arr : np.ndarray
146
+ Array of shape (N, D) where N is the number of frames, D is number of classes.
147
+
148
+ f : int
149
+ Aggregation factor (number of frames per chunk).
150
+
151
+ Returns:
152
+ -------
153
+ np.ndarray
154
+ Aggregated labels of shape (ceil(N/f), D)
155
+ """
156
+ N, D = arr.shape
157
+ full_chunks = N // f
158
+ remainder = N % f
159
+
160
+ # Aggregate full chunks
161
+ aggregated = arr[:full_chunks * f].reshape(full_chunks, f, D).max(axis=1)
162
+
163
+ # Handle leftover frames
164
+ if remainder > 0:
165
+ tail = arr[full_chunks * f:].max(axis=0, keepdims=True)
166
+ aggregated = np.vstack([aggregated, tail])
167
+
168
+ return aggregated
169
+
170
+
171
+ def get_ground_truth_df(filenames: Iterable[str], dataset_path: str) -> pd.DataFrame:
172
+ """
173
+ Loads and aggregates ground truth labels for an arbitrary list of files.
174
+
175
+ Parameters:
176
+ ----------
177
+ filenames : Iterable[str]
178
+ List or array of filenames (e.g., from a subset of metadata.csv) to process.
179
+
180
+ dataset_path : str
181
+ Path to dataset containing the 'labels/' folder with .npz files.
182
+
183
+ Returns:
184
+ -------
185
+ pd.DataFrame
186
+ DataFrame with columns: ["filename", "onset"] + CLASSES
187
+ """
188
+ rows = []
189
+
190
+ for fname in filenames:
191
+ base = os.path.splitext(fname)[0]
192
+ label_path = os.path.join(dataset_path, 'labels', f"{base}_labels.npz")
193
+ assert os.path.exists(label_path), f"Missing label file: {label_path}"
194
+
195
+ y = np.load(label_path)
196
+ class_matrix = np.stack([y[cls].mean(-1) for cls in CLASSES], axis=1)
197
+ aggregated = aggregate_targets(class_matrix)
198
+
199
+ for i, row in enumerate(aggregated):
200
+ onset = round(i * 1.2, 1)
201
+ binary_labels = (row > 0).astype(int).tolist()
202
+ rows.append([fname, onset] + binary_labels)
203
+
204
+ return pd.DataFrame(data=rows, columns=["filename", "onset"] + CLASSES)
205
+
206
+
207
+ def get_segment_prediction_df(
208
+ predictions: Dict[str, Dict[str, np.ndarray]],
209
+ class_names: Optional[List[str]] = None
210
+ ) -> pd.DataFrame:
211
+ """
212
+ Aggregates frame-level predictions into fixed-length segments for a set of files.
213
+
214
+ Parameters:
215
+ ----------
216
+ predictions : Dict[str, Dict[str, np.ndarray]]
217
+ Dictionary mapping each filename to another dictionary of class-wise frame-level predictions.
218
+ Each class prediction is a 1D NumPy array of shape (T,), where T is time.
219
+
220
+ class_names : List[str], optional
221
+ List of class names to include in the output. If None, uses keys from the first file's prediction dict.
222
+
223
+ Returns:
224
+ -------
225
+ pd.DataFrame
226
+ DataFrame with columns: ["filename", "onset"] + class_names.
227
+ Each row represents a segment and contains aggregated predictions for that segment.
228
+ """
229
+ if class_names is None:
230
+ class_names = list(next(iter(predictions.values())).keys())
231
+
232
+ rows = []
233
+
234
+ for filename, class_preds in predictions.items():
235
+ # Collect and stack predictions into shape (T, num_classes)
236
+ frame_matrix = np.stack([class_preds[cls] for cls in class_names], axis=1)
237
+
238
+ # Aggregate over fixed-length segments
239
+ aggregated = aggregate_targets(frame_matrix, f=10)
240
+
241
+ for seg_idx, segment in enumerate(aggregated):
242
+ onset = round(seg_idx * 1.2, 1)
243
+ rows.append([filename, onset] + segment.tolist())
244
+
245
+ return pd.DataFrame(rows, columns=["filename", "onset"] + class_names)
246
+
247
+
248
+
249
+
250
+ if __name__ == "__main__":
251
+ parser = argparse.ArgumentParser(description="Compute total cost for environmental noise predictions.")
252
+
253
+ parser.add_argument(
254
+ "--dataset_path",
255
+ type=str,
256
+ required=True,
257
+ help="Path to the root directory of the dataset (must contain 'audio_features/')."
258
+ )
259
+
260
+ parser.add_argument(
261
+ "--ground_truth_csv",
262
+ type=str,
263
+ default=None,
264
+ help="Path to the CSV file containing the ground truth labels."
265
+ )
266
+
267
+ parser.add_argument(
268
+ "--predictions_csv",
269
+ type=str,
270
+ required=True,
271
+ help="Path to the CSV file containing the predicted labels."
272
+ )
273
+
274
+ args = parser.parse_args()
275
+
276
+ df_pred = pd.read_csv(args.predictions_csv)
277
+ check_dataframe(df_pred, dataset_path=args.dataset_path)
278
+ print("Predictions CSV formated correctly.")
279
+
280
+ if args.ground_truth_csv is not None:
281
+ df_gt = pd.read_csv(args.ground_truth_csv)
282
+ check_dataframe(df_gt, dataset_path=args.dataset_path)
283
+ print("Ground truth CSV formated correctly.")
284
+
285
+ total, breakdown = total_cost(df_pred, df_gt)
286
+
287
+ print("Total cost:", total)