Wuhuwill commited on
Commit
8105328
·
verified ·
1 Parent(s): 386d0c8

Upload ProDiff/Experiments/trajectory_exp_may_data_TKY_len3_ddpm_20250724-100624/code_snapshot/main.py with huggingface_hub

Browse files
ProDiff/Experiments/trajectory_exp_may_data_TKY_len3_ddpm_20250724-100624/code_snapshot/main.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import datetime
4
+ import shutil
5
+ from pathlib import Path
6
+ import argparse
7
+ from types import SimpleNamespace
8
+ import sys
9
+ import numpy as np
10
+
11
+
12
+ from conf import config as config_module # Corrected alias
13
+ from utils.logger import Logger, log_info
14
+ from utils.utils import set_seed, ddp_setup, destroy_process_group, get_data_paths
15
+ from dataset.data_util import TrajectoryDataset
16
+ from torch.utils.data import DataLoader
17
+
18
+
19
+ from train import train_main
20
+ from test import test_model
21
+
22
+ def setup_experiment_environment(base_exp_dir, exp_name_with_timestamp, config_to_save, files_to_copy=None):
23
+ """Sets up the experiment directory structure and saves essential files."""
24
+ exp_dir = base_exp_dir / exp_name_with_timestamp
25
+ results_dir = exp_dir / 'results'
26
+ models_dir = exp_dir / 'models' # Unified models dir, not timestamped sub-dir by default here
27
+ logs_dir = exp_dir / 'logs'
28
+ code_save_dir = exp_dir / 'code_snapshot'
29
+
30
+ os.makedirs(results_dir, exist_ok=True)
31
+ os.makedirs(models_dir, exist_ok=True)
32
+ os.makedirs(logs_dir, exist_ok=True)
33
+ os.makedirs(code_save_dir, exist_ok=True)
34
+
35
+ # Save configuration
36
+ # (Convert SimpleNamespace to dict for easier saving if needed, or save as text)
37
+ with open(exp_dir / 'config_used.txt', 'w') as f:
38
+ import json
39
+ # Convert SimpleNamespace to dict for JSON serialization
40
+ def ns_to_dict(ns):
41
+ if isinstance(ns, SimpleNamespace):
42
+ return {k: ns_to_dict(v) for k, v in ns.__dict__.items()}
43
+ elif isinstance(ns, dict):
44
+ return {k: ns_to_dict(v) for k, v in ns.items()}
45
+ elif isinstance(ns, list):
46
+ return [ns_to_dict(i) for i in ns]
47
+ return ns
48
+ config_dict = ns_to_dict(config_to_save)
49
+ json.dump(config_dict, f, indent=4)
50
+
51
+ # Copy essential code files
52
+ if files_to_copy:
53
+ for file_path_str in files_to_copy:
54
+ try:
55
+ file_path = Path(file_path_str)
56
+ if file_path.exists():
57
+ shutil.copy(file_path, code_save_dir)
58
+ else:
59
+ print(f"Warning: File to copy not found: {file_path_str}") # Use logger if available
60
+ except Exception as e:
61
+ print(f"Warning: Could not copy file {file_path_str}: {e}")
62
+
63
+ return exp_dir, models_dir, logs_dir, results_dir
64
+
65
+ def main():
66
+ parser = argparse.ArgumentParser(description='Unified Trajectory Interpolation - Training with Periodic Validation')
67
+ parser.add_argument('--sampling_type', type=str, default='ddpm', choices=['ddpm', 'ddim'],
68
+ help='Diffusion sampling type (ddpm or ddim) - influences periodic validation if DDIM is chosen, and experiment naming.')
69
+ parser.add_argument('--config_module_path', type=str, default='conf.config',
70
+ help='Python module path for base configuration (e.g., conf.config)')
71
+ parser.add_argument('--exp_name', type=str, default='traj_interp_exp',
72
+ help='Base name for the experiment directory')
73
+ parser.add_argument('--seed', type=int, default=42, help='Random seed')
74
+ parser.add_argument('--device_id', type=int, default=0, help='CUDA device ID to use')
75
+ parser.add_argument('--distributed', action='store_true', help='Enable distributed training (DDP)')
76
+
77
+ parser.add_argument('--ddim_steps', type=int, default=50, help='Number of DDIM sampling steps for periodic validation')
78
+ parser.add_argument('--ddim_eta', type=float, default=0.0,
79
+ help='DDIM stochasticity parameter for periodic validation (0=deterministic, 1=DDPM-like)')
80
+
81
+ parser.add_argument('--debug', action='store_true', help='Enable debug mode for more detailed logs')
82
+
83
+ args = parser.parse_args()
84
+
85
+ # --- Basic Setup ---
86
+ if args.distributed:
87
+ ddp_setup(args.distributed) # Sets LOCAL_RANK env var if not already set by launcher
88
+ local_rank = int(os.environ.get('LOCAL_RANK', 0))
89
+ else:
90
+ local_rank = 0
91
+
92
+ if not args.distributed or local_rank == 0: # Setup master process first or if not distributed
93
+ print(f"Running on device: cuda:{args.device_id}" if torch.cuda.is_available() else "Running on CPU")
94
+
95
+ if torch.cuda.is_available():
96
+ torch.cuda.set_device(args.device_id if not args.distributed else local_rank)
97
+
98
+ set_seed(args.seed + local_rank) # Ensure different seeds for different processes in DDP for some operations
99
+
100
+ # --- Load Configuration ---
101
+ try:
102
+
103
+ base_config_dict = config_module.load_config() # from conf.config import load_config
104
+ except Exception as e:
105
+ print(f"Error loading base configuration from {args.config_module_path}: {e}")
106
+ sys.exit(1)
107
+
108
+ cfg_ns = {k: SimpleNamespace(**v) for k, v in base_config_dict.items()}
109
+ config = SimpleNamespace(**cfg_ns)
110
+
111
+ # Update config with command-line arguments
112
+ config.debug = args.debug
113
+ config.training.dis_gpu = args.distributed
114
+ config.sampling.type = args.sampling_type
115
+ config.sampling.ddim_steps = args.ddim_steps
116
+ config.sampling.ddim_eta = args.ddim_eta
117
+ config.device_id = args.device_id # Pass device_id for train_main
118
+ # Ensure other necessary fields exist in config (add defaults if not in config.py)
119
+ if not hasattr(config, 'model'): config.model = SimpleNamespace()
120
+ if not hasattr(config.model, 'loss_type'): config.model.loss_type = 'l1' # Default
121
+ if not hasattr(config.training, 'learning_rate'): config.training.learning_rate = 2e-4
122
+ if not hasattr(config.training, 'warmup_epochs'): config.training.warmup_epochs = 10
123
+ if not hasattr(config.training, 'contrastive_margin'): config.training.contrastive_margin = 1.0
124
+ if not hasattr(config.training, 'use_amp'): config.training.use_amp = True
125
+ if not hasattr(config.training, 'kmeans_memory_size'): config.training.kmeans_memory_size = 10 # Batches
126
+ if not hasattr(config.training, 'ce_loss_weight'): config.training.ce_loss_weight = 0.1
127
+ if not hasattr(config.training, 'diffusion_loss_weight'): config.training.diffusion_loss_weight = 1.0
128
+ if not hasattr(config.training, 'contrastive_loss_weight'): config.training.contrastive_loss_weight = 1.0
129
+
130
+ # --- Setup Experiment Environment (only on rank 0 if DDP) ---
131
+ timestamp_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
132
+ # Include sampling type in experiment name for clarity
133
+ exp_name_ts = f"{args.exp_name}_{config.data.dataset}_len{config.data.traj_length}_{args.sampling_type}_{timestamp_str}"
134
+
135
+ exp_dir, models_save_dir, logs_dir, results_dir = Path("."), Path("."), Path("."), Path(".") # Defaults for non-rank0
136
+ if local_rank == 0:
137
+ root_dir = Path(__file__).resolve().parent # Project root
138
+ base_experiment_path = root_dir / "Experiments" # Changed from "Backups"
139
+
140
+ files_to_copy_snapshot = [
141
+ 'main.py', 'train.py', 'test.py', 'conf/config.py',
142
+ 'diffProModel/Diffusion.py', 'diffProModel/protoTrans.py', 'diffProModel/loss.py',
143
+ 'utils/utils.py', 'utils/logger.py', 'utils/metric.py', 'dataset/data_util.py'
144
+ ]
145
+ exp_dir, models_save_dir, logs_dir, results_dir = setup_experiment_environment(
146
+ base_experiment_path, exp_name_ts, config, files_to_copy_snapshot
147
+ )
148
+
149
+ # Logger setup (after exp_dir is known by all processes if DDP, or just for rank 0)
150
+ logger = None
151
+ if local_rank == 0:
152
+ log_file_path = logs_dir / f"log_{timestamp_str}.txt"
153
+ logger = Logger(
154
+ name=exp_name_ts,
155
+ log_path=log_file_path,
156
+ colorize=True,
157
+ level="debug" if args.debug else "info"
158
+ )
159
+ logger.info(f"Experiment directory: {exp_dir}")
160
+ log_info(config, logger) # Log the configuration details
161
+ logger.info(f"Using sampling type for periodic validation: {args.sampling_type}")
162
+ if args.sampling_type == 'ddim':
163
+ logger.info(f"DDIM Steps for validation: {args.ddim_steps}, Eta for validation: {args.ddim_eta}")
164
+
165
+ # Barrier to ensure exp_dir is created by rank 0 before other ranks proceed if DDP
166
+ if args.distributed:
167
+ torch.distributed.barrier()
168
+
169
+ # --- Main Execution: Call Training (which includes periodic validation) ---
170
+ if logger and local_rank == 0:
171
+ logger.info("Starting training with periodic validation...")
172
+
173
+ train_main(config, logger, exp_dir, timestamp_str)
174
+
175
+ if args.distributed:
176
+ if torch.distributed.is_initialized():
177
+ destroy_process_group()
178
+
179
+ if local_rank == 0 and logger:
180
+ logger.info("Main script execution finished.")
181
+
182
+ if __name__ == "__main__":
183
+ main()