Wuhuwill commited on
Commit
fa64139
·
verified ·
1 Parent(s): 365ecba

Upload ProDiff/Experiments/trajectory_exp_may_data_TKY_len3_ddpm_20250724-100624/code_snapshot/test.py with huggingface_hub

Browse files
ProDiff/Experiments/trajectory_exp_may_data_TKY_len3_ddpm_20250724-100624/code_snapshot/test.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ import torch.nn.functional as F
6
+
7
+ from utils.metric import *
8
+ from dataset.data_util import MinMaxScaler
9
+ from utils.utils import mask_data_general, ddp_setup
10
+ # Diffusion model will be imported directly in the main script that calls this test function.
11
+ # from diffProModel.Diffusion import Diffusion # No, pass model as argument
12
+
13
+ def test_model(test_dataloader, diffusion_model, short_samples_model, config, epoch,
14
+ prototypes, device, logger, exp_dir):
15
+ """
16
+ Test the unified Diffusion model (DDPM or DDIM) on the test dataset.
17
+
18
+ Args:
19
+ test_dataloader: DataLoader for test data.
20
+ diffusion_model: The unified diffusion model (instance of diffProModel.Diffusion.Diffusion).
21
+ short_samples_model: Trajectory transformer model for feature extraction.
22
+ config: Configuration object.
23
+ epoch: Current epoch number (or identifier for the test run).
24
+ prototypes: Prototype vectors (e.g., from TrajectoryTransformer or K-Means).
25
+ device: Device to run the model on (already determined by the caller).
26
+ logger: Logger object.
27
+ exp_dir: Experiment directory path.
28
+ """
29
+ # Determine distributed status and local_rank first
30
+ distributed = config.training.dis_gpu
31
+ local_rank = 0
32
+ if distributed:
33
+ # If DDP is active, LOCAL_RANK should be set by the environment.
34
+ # ddp_setup should have been called by the parent process (e.g., train_main or main for DDP launch)
35
+ # test_model itself typically does not re-initialize DDP.
36
+ try:
37
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
38
+ except ValueError:
39
+ if logger: logger.warning("LOCAL_RANK environment variable not a valid integer. Defaulting to 0.")
40
+ local_rank = 0
41
+ # The 'device' argument passed to this function should be the correct one to use.
42
+
43
+ thresholds = [i for i in range(1000, 11000, 1000)] # Thresholds for TC metric
44
+ # Initialize lists to store metrics for each batch
45
+ mtd_list, mppe_list, maepp_list, maeps_list, aptc_list, avg_aptc_list, max_td_list = [], [], [], [], [], [], []
46
+
47
+ # Get sampling parameters from config (assuming they are in config.sampling)
48
+ sampling_type = getattr(config.sampling, 'type', 'ddpm') # Default to ddpm if not specified
49
+ ddim_steps = getattr(config.sampling, 'ddim_steps', 50)
50
+ ddim_eta = getattr(config.sampling, 'ddim_eta', 0.0)
51
+ debug_mode = getattr(config, 'debug', False) # General debug flag
52
+
53
+ if logger and local_rank == 0: # Ensure logger operations happen on rank 0 if distributed
54
+ logger.info(f"Testing with sampling_type: {sampling_type} for epoch {epoch}")
55
+ if sampling_type == 'ddim':
56
+ logger.info(f"DDIM steps: {ddim_steps}, DDIM eta: {ddim_eta}")
57
+
58
+ diffusion_model.eval() # Ensure diffusion model is in eval mode
59
+ short_samples_model.eval() # Ensure feature extractor is in eval mode
60
+
61
+ pbar_desc = f"Epoch {epoch} Test Progress ({sampling_type.upper()})"
62
+ for batch_idx, (abs_time, lat, lng) in enumerate(tqdm(test_dataloader, desc=pbar_desc, disable=(local_rank != 0))):
63
+
64
+ if debug_mode and logger and local_rank == 0:
65
+ logger.info(f"Batch {batch_idx} - Input shapes: abs_time {abs_time.shape}, lat {lat.shape}, lng {lng.shape}")
66
+ logger.info(f"Input data stats - abs_time: min={abs_time.min().item():.4f}, max={abs_time.max().item():.4f}, " +
67
+ f"lat: min={lat.min().item():.4f}, max={lat.max().item():.4f}, " +
68
+ f"lng: min={lng.min().item():.4f}, max={lng.max().item():.4f}")
69
+
70
+ if torch.isnan(abs_time).any() or torch.isnan(lat).any() or torch.isnan(lng).any():
71
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in input data!")
72
+ continue
73
+
74
+ # Prepare input tensor (ground truth for start/end points and for scaling)
75
+ # This testx_raw is used for scaler fitting and as test_x0 for diffusion model
76
+ testx_raw = torch.stack([abs_time, lat, lng], dim=-1).to(device)
77
+
78
+ scaler = MinMaxScaler()
79
+ scaler.fit(testx_raw) # Fit scaler on raw data (before permute)
80
+ testx_scaled = scaler.transform(testx_raw) # Scale data
81
+
82
+ if debug_mode and logger and local_rank == 0:
83
+ logger.info(f"Scaler min: {scaler.min_val.flatten().cpu().numpy()}, max: {scaler.max_val.flatten().cpu().numpy()}")
84
+
85
+ if torch.isnan(testx_scaled).any():
86
+ if logger and local_rank == 0:
87
+ logger.error(f"Batch {batch_idx} - NaN detected after scaling!")
88
+ if torch.any(scaler.max_val == scaler.min_val):
89
+ logger.error("Division by zero in scaler possible: max_val equals min_val for some features.")
90
+ continue
91
+
92
+ # Permute for diffusion model input: (batch_size, num_features, traj_length)
93
+ testx_scaled_permuted = testx_scaled.permute(0, 2, 1)
94
+
95
+ # Create masked input for conditioning (for feature extraction by short_samples_model)
96
+ # mask_data_general expects (batch_size, num_features, traj_length)
97
+ masked_condition_permuted = mask_data_general(testx_scaled_permuted)
98
+ # short_samples_model expects (batch_size, traj_length, num_features)
99
+ masked_condition_for_ssm = masked_condition_permuted.permute(0, 2, 1)
100
+
101
+ with torch.no_grad():
102
+ _, query_features = short_samples_model(masked_condition_for_ssm)
103
+
104
+ if torch.isnan(query_features).any():
105
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in query_features!")
106
+ continue
107
+ if torch.isnan(prototypes).any():
108
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in provided prototypes!")
109
+ continue
110
+
111
+ # Match query features with prototypes (e.g., via cosine similarity and softmax attention)
112
+ # This logic should align with how matched_prototypes are generated during training
113
+ cos_sim = F.cosine_similarity(query_features.unsqueeze(1), prototypes.unsqueeze(0), dim=-1)
114
+ if torch.isnan(cos_sim).any():
115
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in cos_sim!")
116
+ continue
117
+
118
+ # Using the same attention-weighted sum as in the unified training script
119
+ d_k = query_features.size(-1)
120
+ scaled_cos_sim = F.softmax(cos_sim / np.sqrt(d_k), dim=-1)
121
+ matched_prototypes_for_diffusion = torch.matmul(scaled_cos_sim, prototypes).to(device)
122
+
123
+ if torch.isnan(matched_prototypes_for_diffusion).any():
124
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in matched_prototypes!")
125
+ continue
126
+
127
+ if debug_mode and logger and local_rank == 0:
128
+ logger.info(f"Sampling with type: {sampling_type}, DDIM steps: {ddim_steps}, eta: {ddim_eta}")
129
+ logger.info(f"Input to diffusion model (testx_scaled_permuted) shape: {testx_scaled_permuted.shape}, "
130
+ f"masked condition (masked_condition_permuted) shape: {masked_condition_permuted.shape}, "
131
+ f"matched prototypes shape: {matched_prototypes_for_diffusion.shape}")
132
+
133
+ try:
134
+
135
+ pred_x0_scaled = diffusion_model.sample(
136
+ test_x0=testx_scaled_permuted, # Ground truth (scaled) for start/end points and reference
137
+ attr=masked_condition_permuted, # Masked data for conditional U-Net input (GuideNet attr)
138
+ prototype=matched_prototypes_for_diffusion, # Matched prototypes for GuideNet
139
+ sampling_type=sampling_type,
140
+ ddim_num_steps=ddim_steps,
141
+ ddim_eta=ddim_eta
142
+ )
143
+
144
+ if torch.isnan(pred_x0_scaled).any():
145
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected in Diffusion model output!")
146
+ continue
147
+
148
+ except Exception as e:
149
+ if logger and local_rank == 0: logger.error(f"Exception during Diffusion model sampling: {str(e)}")
150
+ import traceback
151
+ if logger and local_rank == 0: logger.error(traceback.format_exc())
152
+ continue
153
+
154
+ # pred_x0_scaled is (batch_size, num_features, traj_length)
155
+ pred_x0_scaled_unpermuted = pred_x0_scaled.permute(0, 2, 1)
156
+
157
+ if debug_mode and logger and local_rank == 0:
158
+ logger.info(f"pred_x0_scaled_unpermuted stats before inverse_transform: min={pred_x0_scaled_unpermuted.min().item():.4f}, max={pred_x0_scaled_unpermuted.max().item():.4f}")
159
+
160
+ if (pred_x0_scaled_unpermuted < 0).any() or (pred_x0_scaled_unpermuted > 1).any():
161
+ if logger and local_rank == 0:
162
+ logger.warning(f"Batch {batch_idx} - Values outside [0,1] in pred_x0_scaled: min={pred_x0_scaled_unpermuted.min().item():.4f}, max={pred_x0_scaled_unpermuted.max().item():.4f}. Clamping.")
163
+ pred_x0_scaled_unpermuted = torch.clamp(pred_x0_scaled_unpermuted, 0, 1)
164
+
165
+ # Inverse transform to original data scale - ensure this happens on the correct device
166
+ pred_x0_final = scaler.inverse_transform(pred_x0_scaled_unpermuted)
167
+
168
+ ground_truth_final = testx_raw.cpu()
169
+
170
+ if torch.isnan(pred_x0_final).any() or torch.isnan(ground_truth_final).any():
171
+ if logger and local_rank == 0: logger.error(f"Batch {batch_idx} - NaN detected after inverse transform!")
172
+ continue
173
+
174
+ # Move to CPU before converting to NumPy for metric calculation
175
+ pred_x0_np = pred_x0_final.cpu().numpy()
176
+ ground_truth_np = ground_truth_final.numpy()
177
+
178
+ if debug_mode and logger and local_rank == 0:
179
+ logger.info(f"Shapes for metrics: pred_x0_np {pred_x0_np.shape}, ground_truth_np {ground_truth_np.shape}")
180
+ logger.info(f"pred_x0_np stats: min={np.min(pred_x0_np):.4f}, max={np.max(pred_x0_np):.4f}")
181
+ logger.info(f"ground_truth_np stats: min={np.min(ground_truth_np):.4f}, max={np.max(ground_truth_np):.4f}")
182
+
183
+ try:
184
+ mtd_list.append(mean_trajectory_deviation(pred_x0_np, ground_truth_np))
185
+ mppe_list.append(mean_point_to_point_error(pred_x0_np, ground_truth_np))
186
+ maepp_list.append(mean_absolute_error_per_point(pred_x0_np[:, :, 0], ground_truth_np[:, :, 0]))
187
+ maeps_list.append(mean_absolute_error_per_sample(pred_x0_np[:, :, 0], ground_truth_np[:, :, 0]))
188
+ aptc_result, avg_aptc_result = trajectory_coverage(pred_x0_np, ground_truth_np, thresholds)
189
+ aptc_list.append(aptc_result)
190
+ avg_aptc_list.append(avg_aptc_result)
191
+ max_td_list.append(max_trajectory_deviation(pred_x0_np, ground_truth_np))
192
+ except Exception as e:
193
+ if logger and local_rank == 0: logger.error(f"Exception during metric calculation in batch {batch_idx}: {str(e)}")
194
+ if debug_mode and logger and local_rank == 0: import traceback; logger.error(traceback.format_exc())
195
+ continue
196
+
197
+ if debug_mode and batch_idx == 0 and os.environ.get('PROJECT_DEBUG_MODE', '0') == '1': # Use a distinct env var for this specific break
198
+ if logger and local_rank == 0: logger.info("Project debug mode: Breaking after first test batch")
199
+ break
200
+
201
+ # Aggregate and log metrics (only on rank 0 if distributed)
202
+ if local_rank == 0:
203
+ mean_mtd = np.mean(mtd_list) if mtd_list else float('nan')
204
+ mean_mppe = np.mean(mppe_list) if mppe_list else float('nan')
205
+ mean_maepp = np.mean(maepp_list) if maepp_list else float('nan')
206
+ mean_maeps = np.mean(maeps_list) if maeps_list else float('nan')
207
+ mean_avg_aptc = np.mean(avg_aptc_list) if avg_aptc_list else float('nan')
208
+ mean_max_td = np.max(max_td_list) if max_td_list else float('nan') # MaxTD is max over all samples
209
+ mean_aptc_thresholds = {k: np.mean([d[k] for d in aptc_list if k in d]) for k in aptc_list[0]} if aptc_list else {f'TC@{thr}': float('nan') for thr in thresholds}
210
+
211
+ if logger:
212
+ logger.info(f"--- Test Results for Epoch {epoch} ({sampling_type.upper()}) ---")
213
+ logger.info(f"Mean MTD: {mean_mtd:.4f}")
214
+ logger.info(f"Mean MPPE: {mean_mppe:.4f}")
215
+ logger.info(f"Mean MAEPP (time): {mean_maepp:.4f}")
216
+ logger.info(f"Mean MAEPS (time): {mean_maeps:.4f}")
217
+ logger.info(f"Mean AVG_TC: {mean_avg_aptc:.4f}")
218
+ logger.info(f"Overall MaxTD: {mean_max_td:.4f}")
219
+ for threshold_val, tc_val in mean_aptc_thresholds.items():
220
+ logger.info(f"Mean {threshold_val}: {tc_val:.4f}")
221
+ if sampling_type == 'ddim':
222
+ logger.info(f"DDIM sampling with {ddim_steps} steps, eta: {ddim_eta:.2f}")
223
+ else:
224
+ logger.info(f"DDPM sampling with {config.diffusion.num_diffusion_timesteps} steps")
225
+
226
+ # Save results to .npy files
227
+ results_dir = exp_dir / 'results'
228
+ os.makedirs(results_dir, exist_ok=True)
229
+ sampling_prefix = f"{sampling_type.upper()}_"
230
+
231
+ def save_metric_npy(metric_name, value, current_epoch):
232
+ file_path = results_dir / f"{sampling_prefix}Test_mean_{metric_name}.npy"
233
+ if np.isnan(value): return # Don't save if NaN
234
+ if os.path.exists(file_path):
235
+ try:
236
+ existing_data = np.load(file_path, allow_pickle=True).item()
237
+ except: # Handle empty or corrupted file
238
+ existing_data = {}
239
+ existing_data[current_epoch] = value
240
+ else:
241
+ existing_data = {current_epoch: value}
242
+ np.save(file_path, existing_data)
243
+
244
+ save_metric_npy('mtd', mean_mtd, epoch)
245
+ save_metric_npy('mppe', mean_mppe, epoch)
246
+ save_metric_npy('maepp', mean_maepp, epoch)
247
+ save_metric_npy('maeps', mean_maeps, epoch)
248
+ save_metric_npy('avg_aptc', mean_avg_aptc, epoch)
249
+ save_metric_npy('max_td', mean_max_td, epoch)
250
+ for threshold_key, tc_value in mean_aptc_thresholds.items():
251
+ metric_key_name = threshold_key.replace('@', '_at_') # Sanitize for filename
252
+ save_metric_npy(f"tc_{metric_key_name}", tc_value, epoch)
253
+
254
+ if logger: logger.info(f"Saved test metrics to {results_dir}")
255
+
256
+ # Ensure all processes finish if in DDP, though testing is usually single-process or rank 0 handles results
257
+ if torch.distributed.is_initialized():
258
+ torch.distributed.barrier() # Wait for all processes if any were involved
259
+
260
+ return { # Return main metrics, could be useful for main script
261
+ "mean_mtd": mean_mtd,
262
+ "mean_mppe": mean_mppe
263
+ } if local_rank == 0 else {}